diff options
266 files changed, 1692 insertions, 2103 deletions
diff --git a/.github/workflows/list_missing.yml b/.github/workflows/list_missing.yml deleted file mode 100644 index 0ed7e01..0000000 --- a/.github/workflows/list_missing.yml +++ /dev/null @@ -1,21 +0,0 @@ -on: [push, pull_request] - -jobs: - missing: - name: List missing - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - run: ./test/test.sh --missing - - run: ./test/test.sh --coverage >> $GITHUB_ENV - - uses: schneegans/dynamic-badges-action@v1.7.0 - with: - auth: ${{ secrets.GIST_COVERAGE_SECRET }} - gistID: 73fb3c58350c58b623f221fc237def62 - filename: tcr_coverage.json - label: coverage - message: ${{ env.COVERED }}/${{ env.TOTAL }} - namedLogo: GitHub - valColorRange: ${{ env.TOTAL }} - minColorRange: ${{ env.REQUIRED }} - maxColorRange: ${{ env.TOTAL }} diff --git a/.github/workflows/test_all.yml b/.github/workflows/test_all.yml deleted file mode 100644 index 570063f..0000000 --- a/.github/workflows/test_all.yml +++ /dev/null @@ -1,14 +0,0 @@ -on: - workflow_dispatch: - -jobs: - all: - strategy: - matrix: - os: [ubuntu-latest, ubuntu-22.04] - name: Test all (${{ matrix.os }}) - runs-on: ${{ matrix.os }} - timeout-minutes: 50 - steps: - - uses: actions/checkout@v4 - - run: ./test/test.sh diff --git a/.github/workflows/test_datastructures.yml b/.github/workflows/test_datastructures.yml deleted file mode 100644 index e5383c5..0000000 --- a/.github/workflows/test_datastructures.yml +++ /dev/null @@ -1,22 +0,0 @@ -on: - push: - paths: - - 'content/datastructures/**' - - 'test/datastructures/**' - pull_request: - paths: - - 'content/datastructures/**' - - 'test/datastructures/**' - workflow_dispatch: - -jobs: - datastructures: - strategy: - matrix: - os: [ubuntu-latest, ubuntu-22.04] - name: Test datastructures (${{ matrix.os }}) - runs-on: ${{ matrix.os }} - timeout-minutes: 20 - steps: - - uses: actions/checkout@v4 - - run: ./test/test.sh datastructures diff --git a/.github/workflows/test_geometry.yml b/.github/workflows/test_geometry.yml deleted file mode 100644 index 09dbb6f..0000000 --- a/.github/workflows/test_geometry.yml +++ /dev/null @@ -1,22 +0,0 @@ -on: - push: - paths: - - 'content/geometry/**' - - 'test/geometry/**' - pull_request: - paths: - - 'content/geometry/**' - - 'test/geometry/**' - workflow_dispatch: - -jobs: - geometry: - strategy: - matrix: - os: [ubuntu-latest, ubuntu-22.04] - name: Test geometry (${{ matrix.os }}) - runs-on: ${{ matrix.os }} - timeout-minutes: 20 - steps: - - uses: actions/checkout@v4 - - run: ./test/test.sh geometry diff --git a/.github/workflows/test_graph.yml b/.github/workflows/test_graph.yml deleted file mode 100644 index 53a5d6e..0000000 --- a/.github/workflows/test_graph.yml +++ /dev/null @@ -1,22 +0,0 @@ -on: - push: - paths: - - 'content/graph/**' - - 'test/graph/**' - pull_request: - paths: - - 'content/graph/**' - - 'test/graph/**' - workflow_dispatch: - -jobs: - graph: - strategy: - matrix: - os: [ubuntu-latest, ubuntu-22.04] - name: Test graph (${{ matrix.os }}) - runs-on: ${{ matrix.os }} - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - run: ./test/test.sh graph diff --git a/.github/workflows/test_math.yml b/.github/workflows/test_math.yml deleted file mode 100644 index f9622e5..0000000 --- a/.github/workflows/test_math.yml +++ /dev/null @@ -1,22 +0,0 @@ -on: - push: - paths: - - 'content/math/**' - - 'test/math/**' - pull_request: - paths: - - 'content/math/**' - - 'test/math/**' - workflow_dispatch: - -jobs: - math: - strategy: - matrix: - os: [ubuntu-latest, ubuntu-22.04] - name: Test math (${{ matrix.os }}) - runs-on: ${{ matrix.os }} - timeout-minutes: 30 - steps: - - uses: actions/checkout@v4 - - run: ./test/test.sh math diff --git a/.github/workflows/test_other.yml b/.github/workflows/test_other.yml deleted file mode 100644 index 14c0550..0000000 --- a/.github/workflows/test_other.yml +++ /dev/null @@ -1,22 +0,0 @@ -on: - push: - paths: - - 'content/other/**' - - 'test/other/**' - pull_request: - paths: - - 'content/other/**' - - 'test/other/**' - workflow_dispatch: - -jobs: - other: - strategy: - matrix: - os: [ubuntu-latest, ubuntu-22.04] - name: Test other (${{ matrix.os }}) - runs-on: ${{ matrix.os }} - timeout-minutes: 10 - steps: - - uses: actions/checkout@v4 - - run: ./test/test.sh other diff --git a/.github/workflows/test_pdf.yml b/.github/workflows/test_pdf.yml deleted file mode 100644 index ab273f7..0000000 --- a/.github/workflows/test_pdf.yml +++ /dev/null @@ -1,39 +0,0 @@ -on: - push: - paths: - - 'content/**' - - 'Makefile' - pull_request: - paths: - - 'content/**' - - 'Makefile' - workflow_dispatch: - -jobs: - pdf_22-04: - name: Test pdf (ubuntu-22.04) - runs-on: ubuntu-22.04 - timeout-minutes: 5 - steps: - - uses: actions/checkout@v4 - - run: | - sudo apt-get update - sudo apt-get install latexmk texlive-latex-base texlive-latex-recommended texlive-latex-extra texlive-lang-german texlive-fonts-extra - - run: make - - pdf_latest: - name: Test pdf (ubuntu-latest) - runs-on: ubuntu-22.04 - timeout-minutes: 5 - steps: - - uses: actions/checkout@v4 - - run: | - sudo apt-get update - sudo apt-get install latexmk texlive-latex-base texlive-latex-recommended texlive-latex-extra texlive-lang-german texlive-fonts-extra - - run: make - - uses: exuanbo/actions-deploy-gist@v1 - with: - token: ${{ secrets.GIST_COVERAGE_SECRET }} - gist_id: 73fb3c58350c58b623f221fc237def62 - file_path: tcr.pdf - file_type: binary diff --git a/.github/workflows/test_string.yml b/.github/workflows/test_string.yml deleted file mode 100644 index 0d79040..0000000 --- a/.github/workflows/test_string.yml +++ /dev/null @@ -1,22 +0,0 @@ -on: - push: - paths: - - 'content/string/**' - - 'test/string/**' - pull_request: - paths: - - 'content/string/**' - - 'test/string/**' - workflow_dispatch: - -jobs: - string: - strategy: - matrix: - os: [ubuntu-latest, ubuntu-22.04] - name: Test string (${{ matrix.os }}) - runs-on: ${{ matrix.os }} - timeout-minutes: 10 - steps: - - uses: actions/checkout@v4 - - run: ./test/test.sh string diff --git a/.github/workflows/test_template.yml b/.github/workflows/test_template.yml deleted file mode 100644 index 01f57bb..0000000 --- a/.github/workflows/test_template.yml +++ /dev/null @@ -1,22 +0,0 @@ -on: - push: - paths: - - 'content/template/**' - - 'test/template/**' - pull_request: - paths: - - 'content/template/**' - - 'test/template/**' - workflow_dispatch: - -jobs: - template: - strategy: - matrix: - os: [ubuntu-latest, ubuntu-22.04] - name: Test template (${{ matrix.os }}) - runs-on: ${{ matrix.os }} - timeout-minutes: 10 - steps: - - uses: actions/checkout@v4 - - run: ./test/test.sh template @@ -221,9 +221,9 @@ TSWLatexianTemp* *~ -# ignore build dir -build/* -# dont ignore build tcr -!tcr.pdf +# files produced by the testing system +*.test +*.ok +*.d # ignore build test awk files test/awk/* @@ -1,4 +1,26 @@ -all: - cd content; latexmk -pdf tcr -output-directory=.. -aux-directory=../build/ -usepretex="\newcommand{\gitorigin}{https://github.com/mzuenni/ContestReference/tree/$(shell git branch --show-current)/content/}" -clean: - rm -r build/* +LATEXMK = latexmk -interaction=nonstopmode + +tcr.pdf: FORCE + cd content && $(LATEXMK) + +tcr-opt.pdf: FORCE + cd content && $(LATEXMK) -r latexmk.opt + +pdf: tcr.pdf tcr-opt.pdf + +all: pdf test + +test: + +gmake -C test + +clean: cleanpdf cleantest + +cleanpdf: + cd content && $(LATEXMK) -C + cd content && $(LATEXMK) -r latexmk.opt -C + +cleantest: + +-gmake -C test clean + +FORCE: +.PHONY: all pdf test clean cleanpdf cleantest FORCE diff --git a/README.md b/README.md deleted file mode 100644 index 7edf67b..0000000 --- a/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# KIT Team Contest Reference
-> [!TIP]
-> You can use this [pdf.js link](https://mozilla.github.io/pdf.js/web/viewer.html?file=https://raw.githubusercontent.com/mzuenni/ContestReference/new-master/tcr.pdf) to watch the commited pdf with working links,
-> or [this one](https://mozilla.github.io/pdf.js/web/viewer.html?file=https://gist.githubusercontent.com/mzuenni/73fb3c58350c58b623f221fc237def62/raw/tcr.pdf) to look at the current build.
-
-The KIT teams have used this document for ICPC-style contests since roughly 2019.
-It consists of 25 pages of copy-pasteable C++ code and one extra page with a checklist for the practice session.
-
-## Testing
-To make this document as useful as possible we try to (automatically) stress test all code in this repository.
-Nonetheless, not all code is tested and tests might not catch every bug.
-If you find a bug please [open an issue](https://github.com/mzuenni/ContestReference/issues/new).
-If you think code can be changed, improved or replaced also feel free to open an issue or make open a pull request.
-
-[](https://github.com/mzuenni/ContestReference/actions/workflows/test_all.yml/)
-[](https://github.com/mzuenni/ContestReference/actions/workflows/test_pdf.yml/)
-[](https://github.com/mzuenni/ContestReference/actions/workflows/list_missing.yml)
-## Other Resources
-The code in this repo has been accumulated over many years and the origin of the code is unfortunately unknown for most of the snippets.
-Even though much code is written from scratch, plenty of code has been copied from others and just adjusted to our coding style.
-Here is an (incomplete) list of resources that we use (besides those from previous versions):
- - https://github.com/indy256/codelibrary
- - https://github.com/spaghetti-source/algorithm
- - https://github.com/kth-competitive-programming/kactl
-
-## Previous Versions
-- https://github.com/mzuenni/ContestReference/tree/master (2018-2019)
-- https://github.com/pjungeblut/ChaosKITs (2016-2018)
-- https://github.com/niklasb/contest-algos (2012-2016)
-
-## Testing Status
- - [](https://github.com/mzuenni/ContestReference/actions/workflows/test_datastructures.yml/)
- - [](https://github.com/mzuenni/ContestReference/actions/workflows/test_geometry.yml/)
- - [](https://github.com/mzuenni/ContestReference/actions/workflows/test_graph.yml/)
- - [](https://github.com/mzuenni/ContestReference/actions/workflows/test_math.yml/)
- - [](https://github.com/mzuenni/ContestReference/actions/workflows/test_other.yml/)
- - [](https://github.com/mzuenni/ContestReference/actions/workflows/test_string.yml/)
- - [](https://github.com/mzuenni/ContestReference/actions/workflows/test_template.yml/)
diff --git a/content/datastructures/datastructures.tex b/content/datastructures/datastructures.tex index c9f3d2a..38d63d9 100644 --- a/content/datastructures/datastructures.tex +++ b/content/datastructures/datastructures.tex @@ -10,7 +10,7 @@ \subsubsection{Lazy Propagation} Assignment modifications, sum queries \\ - \method{lower\_bound}{erster Index in $[l, r)$ $\geq$ x (erfordert max-combine)}{\log(n)} + \method{binary\_search}{kleinstes $x$ in $[l, r]$ mit $f(\text{query}(l, x))$ (monoton in $x$)}{\log(n)} \sourcecode{datastructures/lazyPropagation.cpp} \end{algorithm} @@ -20,6 +20,8 @@ \method{kth}{sort $[l, r)[k]$}{\log(\Sigma)} \method{countSmaller}{Anzahl elemente in $[l, r)$ kleiner als $k$}{\log(\Sigma)} \end{methods} + $\Sigma$ ist die Gr\"o\ss e des Eingabebereichs, d.h. + $\mathit{max} - \mathit{min}$. \sourcecode{datastructures/waveletTree.cpp} \end{algorithm} \columnbreak @@ -27,15 +29,15 @@ \begin{algorithm}{Fenwick Tree} \begin{methods} \method{init}{baut den Baum auf}{n\*\log(n)} - \method{prefix\_sum}{summe von $[0, i]$}{\log(n)} + \method{prefix\_sum}{summe von $[0, i)$}{\log(n)} \method{update}{addiert ein Delta zu einem Element}{\log(n)} \end{methods} \sourcecode{datastructures/fenwickTree.cpp} \begin{methods} \method{init}{baut den Baum auf}{n\*\log(n)} - \method{prefix\_sum}{summe von [$0, i]$}{\log(n)} - \method{update}{addiert ein Delta zu allen Elementen $[l, r)$. $l\leq r$!}{\log(n)} + \method{prefix\_sum}{summe von $[0, i)$}{\log(n)} + \method{update}{addiert ein Delta zu allen Elementen $[l, r)$}{\log(n)} \end{methods} \sourcecode{datastructures/fenwickTree2.cpp} \end{algorithm} @@ -56,7 +58,7 @@ \begin{algorithm}{Range Minimum Query} \begin{methods} \method{init}{baut Struktur auf}{n\*\log(n)} - \method{queryIdempotent}{Index des Minimums in $[l, r)$. $l<r$!}{1} + \method{query}{Index des Minimums in $[l, r)$}{1} \end{methods} \begin{itemize} \item \code{better}-Funktion muss idempotent sein! @@ -64,11 +66,19 @@ \sourcecode{datastructures/sparseTable.cpp} \end{algorithm} +\begin{algorithm}[optional]{Range Aggregate Query} + \begin{methods} + \method{init}{baut Struktur auf}{n\*\log(n)} + \method{query}{Aggregat über $[l,r)$}{1} + \end{methods} + \sourcecode{datastructures/sparseTableDisjoint.cpp} +\end{algorithm} + \begin{algorithm}{STL-Bitset} \sourcecode{datastructures/bitset.cpp} \end{algorithm} -\begin{algorithm}{Link-Cut-Tree} +\begin{algorithm}{Link/Cut Tree} \begin{methods} \method{LCT}{baut Wald auf}{n} \method{connected}{prüft ob zwei Knoten im selben Baum liegen}{\log(n)} @@ -80,31 +90,45 @@ \end{methods} \sourcecode{datastructures/LCT.cpp} \end{algorithm} -\clearpage +\columnbreak -\begin{algorithm}{Lichao} - \sourcecode{datastructures/lichao.cpp} +\begin{algorithm}{Lower Envelope (Convex Hull Optimization)} + Um aus einem Lower Envelope einen Upper Envelope zu machen (oder + umgekehrt), einfach beim Einfügen der Geraden $m$ und $b$ negieren. + \subsubsection{Monotonic} + \begin{methods} + \method{add}{add line $mx + b$, $m$ is decreasing}{1} + \method{query}{minimum value at $x$, $x$ is increasing}{1} + \end{methods} + \sourcecode{datastructures/monotonicConvexHull.cpp} + \subsubsection{Dynamic} + \begin{methods} + \method{add}{add line $mx + b$}{\log(n)} + \method{query}{minimum value at $x$}{\log(n)} + \end{methods} + \sourcecode{datastructures/dynamicConvexHull.cpp} + \subsubsection{Li Chao Tree} + Every pair of functions has at most one intersection. + + \begin{methods} + \method{insert}{add function}{\log(|xs|)} + \method{query}{minimum value at $x$, $x \in xs$}{\log(|xs|)} + \end{methods} + \sourcecode{datastructures/lichao.cpp} \end{algorithm} \begin{algorithm}{Policy Based Data Structures} - \textbf{Wichtig:} Verwende \code{p.swap(p2)} anstatt \code{swap(p, p2)}! - \sourcecode{datastructures/stlPriorityQueue.cpp} - \columnbreak \sourcecode{datastructures/pbds.cpp} \end{algorithm} -\begin{algorithm}{Lower/Upper Envelope (Convex Hull Optimization)} - Um aus einem lower envelope einen upper envelope zu machen (oder umgekehrt), einfach beim Einfügen der Geraden $m$ und $b$ negieren. - \sourcecode{datastructures/monotonicConvexHull.cpp} - \sourcecode{datastructures/dynamicConvexHull.cpp} -\end{algorithm} - \begin{algorithm}{Union-Find} \begin{methods} - \method{init}{legt $n$ einzelne Unions an}{n} - \method{findSet}{findet den Repräsentanten}{\log(n)} - \method{unionSets}{vereint 2 Mengen}{\log(n)} - \method{m\*findSet + n\*unionSets}{Folge von Befehlen}{n+m\*\alpha(n)} + \method{UnionFind}{legt $n$ einzelne Elemente an}{n} + \method{find}{findet den Repräsentanten}{\log(n)} + \method{link}{vereint 2 Mengen}{\log(n)} + \method{size}{zählt Elemente in Menge, die $a$ enthält}{\log(n)} + \method{add}{fügt neues einzelnes Element ein}{1} + \method{m\*find + n\*link}{Folge von Befehlen}{n+m\*\alpha(n)} \end{methods} \sourcecode{datastructures/unionFind.cpp} \end{algorithm} diff --git a/content/datastructures/dynamicConvexHull.cpp b/content/datastructures/dynamicConvexHull.cpp index 63e0e13..3e4020e 100644 --- a/content/datastructures/dynamicConvexHull.cpp +++ b/content/datastructures/dynamicConvexHull.cpp @@ -1,16 +1,16 @@ struct Line { mutable ll m, c, p; - bool operator<(const Line& o) const {return m < o.m;} - bool operator<(ll x) const {return p < x;} + bool operator<(const Line& o) const { return m > o.m; } + bool operator<(ll x) const { return p < x; } }; -struct HullDynamic : multiset<Line, less<>> { // max über Geraden +struct HullDynamic : multiset<Line, less<>> { // min über Geraden // (for doubles, use INF = 1/.0, div(a,c) = a/c) - ll div(ll a, ll c) {return a / c - ((a ^ c) < 0 && a % c);} + ll div(ll a, ll c) { return a / c - ((a ^ c) < 0 && a % c); } bool isect(iterator x, iterator y) { - if (y == end()) {x->p = INF; return false;} - if (x->m == y->m) x->p = x->c > y->c ? INF : -INF; + if (y == end()) { x->p = INF; return false; } + if (x->m == y->m) x->p = x->c < y->c ? INF : -INF; else x->p = div(y->c - x->c, x->m - y->m); return x->p >= y->p; } @@ -19,13 +19,11 @@ struct HullDynamic : multiset<Line, less<>> { // max über Geraden auto x = insert({m, c, 0}); while (isect(x, next(x))) erase(next(x)); if (x != begin()) { - x--; - if (isect(x, next(x))) { - erase(next(x)); - isect(x, next(x)); - }} + --x; + while (isect(x, next(x))) erase(next(x)); + } while (x != begin() && prev(x)->p >= x->p) { - x--; + --x; isect(x, erase(next(x))); }} diff --git a/content/datastructures/fenwickTree.cpp b/content/datastructures/fenwickTree.cpp index eb5cd73..7013613 100644 --- a/content/datastructures/fenwickTree.cpp +++ b/content/datastructures/fenwickTree.cpp @@ -1,7 +1,7 @@ vector<ll> tree; void update(int i, ll val) { - for (i++; i < sz(tree); i += i & -i) tree[i] += val; + for (i++; i < ssize(tree); i += i & -i) tree[i] += val; } void init(int n) { @@ -10,6 +10,6 @@ void init(int n) { ll prefix_sum(int i) { ll sum = 0; - for (i++; i > 0; i -= i & -i) sum += tree[i]; + for (; i > 0; i &= i-1) sum += tree[i]; return sum; } diff --git a/content/datastructures/fenwickTree2.cpp b/content/datastructures/fenwickTree2.cpp index 9384e3c..7fcdbb9 100644 --- a/content/datastructures/fenwickTree2.cpp +++ b/content/datastructures/fenwickTree2.cpp @@ -1,21 +1,21 @@ vector<ll> add, mul; void update(int l, int r, ll val) { - for (int tl = l + 1; tl < sz(add); tl += tl & -tl) + for (int tl = l + 1; tl < ssize(add); tl += tl & -tl) add[tl] += val, mul[tl] -= val * l; - for (int tr = r + 1; tr < sz(add); tr += tr & -tr) + for (int tr = r + 1; tr < ssize(add); tr += tr & -tr) add[tr] -= val, mul[tr] += val * r; } -void init(vector<ll>& v) { - mul.assign(sz(v) + 1, 0); - add.assign(sz(v) + 1, 0); - for(int i = 0; i < sz(v); i++) update(i, i + 1, v[i]); +void init(vector<ll> &v) { + mul.assign(size(v) + 1, 0); + add.assign(size(v) + 1, 0); + for(int i = 0; i < ssize(v); i++) update(i, i + 1, v[i]); } ll prefix_sum(int i) { - ll res = 0; i++; - for (int ti = i; ti > 0; ti -= ti & -ti) + ll res = 0; + for (int ti = i; ti > 0; ti &= ti-1) res += add[ti] * i + mul[ti]; return res; } diff --git a/content/datastructures/lazyPropagation.cpp b/content/datastructures/lazyPropagation.cpp index ab91364..a5be822 100644 --- a/content/datastructures/lazyPropagation.cpp +++ b/content/datastructures/lazyPropagation.cpp @@ -1,23 +1,22 @@ struct SegTree { using T = ll; using U = ll; - int n; static constexpr T E = 0; // Neutral element for combine - static constexpr U UF = INF; // Unused value by updates - vector<T> tree; + static constexpr U UF = 1e18; // Unused value by updates + int n; + vector<T> tree; vector<U> lazy; int h; - vector<U> lazy; - vector<int> k; // size of segments (optional) + vector<ll> k; // size of segments (optional) - SegTree(const vector<T>& a) : n(sz(a) + 1), tree(2 * n, E), + SegTree(const vector<T>& a) : n(ssize(a) + 1), tree(2 * n, E), //SegTree(int size, T def = E) : n(size + 1), tree(2 * n, def), - h(__lg(2 * n)), lazy(n, UF), k(2 * n, 1) { - copy(all(a), tree.begin() + n); + lazy(n, UF), h(__lg(2 * n)), k(2 * n, 1) { + ranges::copy(a, tree.begin() + n); for (int i = n - 1; i > 0; i--) { k[i] = 2 * k[2 * i]; tree[i] = comb(tree[2 * i], tree[2 * i + 1]); }} - T comb(T a, T b) {return a + b;} // Modify this + E + T comb(T a, T b) { return a + b; } // Modify this + E void apply(int i, U val) { // And this + UF tree[i] = val * k[i]; @@ -44,17 +43,17 @@ struct SegTree { void update(int l, int r, U val) { l += n, r += n; int l0 = l, r0 = r; - push(l0), push(r0 - 1); + push(l0), push(r0); for (; l < r; l /= 2, r /= 2) { if (l&1) apply(l++, val); if (r&1) apply(--r, val); } - build(l0), build(r0 - 1); + build(l0), build(r0); } T query(int l, int r) { l += n, r += n; - push(l), push(r - 1); + push(l), push(r); T resL = E, resR = E; for (; l < r; l /= 2, r /= 2) { if (l&1) resL = comb(resL, tree[l++]); @@ -64,21 +63,23 @@ struct SegTree { } // Optional: - int lower_bound(int l, int r, T x) { + int binary_search(int l, int r, auto &&f) { + if (f(E)) return l; l += n, r += n; - push(l), push(r - 1); + push(l), push(r); int a[64] = {}, lp = 0, rp = 64; for (; l < r; l /= 2, r /= 2) { if (l&1) a[lp++] = l++; if (r&1) a[--rp] = --r; } - for (int i : a) if (i != 0 && tree[i] >= x) { // Modify this + T x = E, y = x; + for (int i : a) if (i != 0 && f(x = comb(y = x, tree[i]))) { while (i < n) { push_down(i); - if (tree[2 * i] >= x) i = 2 * i; // And this - else i = 2 * i + 1; + if (f(x = comb(y, tree[2*i]))) i = 2 * i; + else i = 2 * i + 1, y = x; } - return i - n; + return i - n + 1; } return -1; } diff --git a/content/datastructures/lichao.cpp b/content/datastructures/lichao.cpp index 9c41934..da965dd 100644 --- a/content/datastructures/lichao.cpp +++ b/content/datastructures/lichao.cpp @@ -1,9 +1,10 @@ vector<ll> xs; // IMPORTANT: Initialize before constructing! -int findX(ll i) {return lower_bound(all(xs), i) - begin(xs);} +int findX(ll i) { + return ranges::lower_bound(xs, i) - begin(xs); } -struct Fun { // Default: Linear function. Change as needed. +struct Fun { // Default: Linear function. Change as needed. ll m, c; - ll operator()(int x) {return m*xs[x] + c;} + ll operator()(int x) { return m*xs[x] + c; } }; // Default: Computes min. Change lines with comment for max. @@ -11,18 +12,18 @@ struct Lichao { static constexpr Fun id = {0, INF}; // {0, -INF} int n, cap; vector<Fun> seg; - Lichao() : n(sz(xs)), cap(2 << __lg(n)), seg(2 * cap, id) {} - + Lichao() : n(ssize(xs)), cap(2 << __lg(n)), seg(2 * cap, id) {} + void _insert(Fun f, int l, int r, int i) { while (i < 2 * cap) { int m = (l+r)/2; - if (m >= n) {r = m; i = 2*i; continue;} + if (m >= n) { r = m; i = 2*i; continue; } Fun &g = seg[i]; if (f(m) < g(m)) swap(f, g); // > if (f(l) < g(l)) r = m, i = 2*i; // > else l = m, i = 2*i+1; }} - void insert(Fun f) {_insert(f, 0, cap, 1);} + void insert(Fun f) { _insert(f, 0, cap, 1); } void _segmentInsert(Fun f, int l, int r, int a, int b, int i) { if (l <= a && b <= r) _insert(f, a, b, i); @@ -42,5 +43,5 @@ struct Lichao { } return ans; } - ll query(ll x) {return _query(findX(x));} + ll query(ll x) { return _query(findX(x)); } }; diff --git a/content/datastructures/monotonicConvexHull.cpp b/content/datastructures/monotonicConvexHull.cpp index f1721ae..295acc4 100644 --- a/content/datastructures/monotonicConvexHull.cpp +++ b/content/datastructures/monotonicConvexHull.cpp @@ -1,27 +1,25 @@ -// Min über Geraden mit MONOTONEN Inserts UND Queries. Jede neue -// Gerade hat kleineres pair(m, c) als alle vorherigen. -struct Line { - ll m, c; - ll operator()(ll x) {return m*x+c;} -}; +struct Envelope { + struct Line { + ll m, b; + ll operator()(ll x) { return m*x+b; } + }; -vector<Line> ls; -ll ptr = 0; + vector<Line> ls; + int ptr = 0; -bool bad(Line l1, Line l2, Line l3) { - return (l3.c-l1.c)*(l1.m-l2.m) < (l2.c-l1.c)*(l1.m-l3.m); -} + static bool bad(Line l1, Line l2, Line l3) { + return (l3.b-l1.b)*(l1.m-l2.m) < (l2.b-l1.b)*(l1.m-l3.m); + } -void add(ll m, ll c) { // m fallend, Laufzeit O(1) amortisiert - while (sz(ls) > 1 && bad(ls.end()[-2], ls.end()[-1], {m, c})) { - ls.pop_back(); + void add(ll m, ll b) { + while (ssize(ls) > 1 + && bad(ls.end()[-2], ls.back(), {m,b})) ls.pop_back(); + ls.push_back({m, b}); + ptr = min(ptr, (int)ssize(ls) - 1); } - ls.push_back({m, c}); - ptr = min(ptr, sz(ls) - 1); -} -ll query(ll x) { // x >= letztes x, Laufzeit: O(1) amortisiert - ptr = min(ptr, sz(ls) - 1); - while (ptr + 1 < sz(ls) && ls[ptr + 1](x) < ls[ptr](x)) ptr++; - return ls[ptr](x); -}
\ No newline at end of file + ll query(ll x) { + while (ptr < ssize(ls)-1 && ls[ptr+1](x) < ls[ptr](x)) ptr++; + return ls[ptr](x); + } +}; diff --git a/content/datastructures/pbds.cpp b/content/datastructures/pbds.cpp index de0ace6..734bf91 100644 --- a/content/datastructures/pbds.cpp +++ b/content/datastructures/pbds.cpp @@ -1,14 +1,22 @@ +#include <ext/pb_ds/priority_queue.hpp> +template<typename T> +using pQueue = __gnu_pbds::priority_queue<T>; //<T, greater<T>> +auto it = pq.push(5); // O(1) +pq.modify(it, 6); // O(log n) +pq.erase(it); // O(log n) +pq.join(pq2); // O(1) +pq.swap(pq2); // O(1) + #include <ext/pb_ds/assoc_container.hpp> using namespace __gnu_pbds; template<typename T> using Tree = tree<T, null_type, less<T>, rb_tree_tag, tree_order_statistics_node_update>; -// T.order_of_key(x): number of elements strictly less than x -// *T.find_by_order(k): k-th element +T.order_of_key(x); // number of elements strictly less than x +auto it = T.find_by_order(k); // k-th element constexpr uint64_t RNG = ll(2e18 * acos(-1)) | 199; // random odd -template<typename T> -struct chash { +template<typename T> struct chash { size_t operator()(T o) const { return __builtin_bswap64(hash<T>()(o) * RNG); }}; diff --git a/content/datastructures/persistent.cpp b/content/datastructures/persistent.cpp index f26680d..ed2f891 100644 --- a/content/datastructures/persistent.cpp +++ b/content/datastructures/persistent.cpp @@ -1,18 +1,18 @@ -template<typename T>
-struct persistent {
- int& time;
- vector<pair<int, T>> data;
-
- persistent(int& time, T value = {})
- : time(time), data(1, {2*time, value}) {}
-
- T get(int t) {
- return prev(upper_bound(all(data),pair{2*t+1, T{}}))->second;
- }
-
- int set(T value) {
- time++;
- data.push_back({2*time, value});
- return time;
- }
-};
+template<typename T> +struct persistent { + int& time; + vector<pair<int, T>> data; + + persistent(int& time, T value = {}) + : time(time), data(1, {2*time, value}) {} + + T get(int t) { + return ranges::upper_bound(data,pair{2*t+1, T{}})[-1].second; + } + + int set(T value) { + time++; + data.push_back({2*time, value}); + return time; + } +}; diff --git a/content/datastructures/persistentArray.cpp b/content/datastructures/persistentArray.cpp index 8326700..903bd0e 100644 --- a/content/datastructures/persistentArray.cpp +++ b/content/datastructures/persistentArray.cpp @@ -1,24 +1,24 @@ -template<typename T>
-struct persistentArray {
- int time;
- vector<persistent<T>> data;
- vector<pair<int, int>> mods;
-
- persistentArray(int n, T value = {})
- : time(0), data(n, {time, value}) {}
-
- T get(int p, int t) {return data[p].get(t);}
-
- int set(int p, T value) {
- mods.push_back({p, data[p].set(value)});
- return mods.back().second;
- }
-
- void reset(int t) {
- while (!mods.empty() && mods.back().second > t) {
- data[mods.back().first].data.pop_back();
- mods.pop_back();
- }
- time = t;
- }
-};
+template<typename T> +struct persistentArray { + int time; + vector<persistent<T>> data; + vector<pair<int, int>> mods; + + persistentArray(int n, T value = {}) + : time(0), data(n, {time, value}) {} + + T get(int p, int t) { return data[p].get(t); } + + int set(int p, T value) { + mods.push_back({p, data[p].set(value)}); + return mods.back().second; + } + + void reset(int t) { + while (!mods.empty() && mods.back().second > t) { + data[mods.back().first].data.pop_back(); + mods.pop_back(); + } + time = t; + } +}; diff --git a/content/datastructures/segmentTree.cpp b/content/datastructures/segmentTree.cpp index 6b69d0b..1fbf886 100644 --- a/content/datastructures/segmentTree.cpp +++ b/content/datastructures/segmentTree.cpp @@ -4,14 +4,15 @@ struct SegTree { vector<T> tree; static constexpr T E = 0; // Neutral element for combine - SegTree(vector<T>& a) : n(sz(a)), tree(2 * n) { - //SegTree(int size, T val = E) : n(size), tree(2 * n, val) { - copy(all(a), tree.begin() + n); + SegTree(vector<T>& a) : n(ssize(a)), tree(2 * n, E) { + ranges::copy(a, tree.begin() + n); + //SegTree(int size, T val = E) : n(size), tree(2 * n, E) { + // fill(tree.begin() + n, tree.end(), val); for (int i = n - 1; i > 0; i--) { // remove for range update tree[i] = comb(tree[2 * i], tree[2 * i + 1]); }} - T comb(T a, T b) {return a + b;} // modify this + neutral + T comb(T a, T b) { return a + b; } // modify this + neutral void update(int i, T val) { tree[i += n] = val; // apply update code diff --git a/content/datastructures/sparseTable.cpp b/content/datastructures/sparseTable.cpp index b3f946e..5455ef5 100644 --- a/content/datastructures/sparseTable.cpp +++ b/content/datastructures/sparseTable.cpp @@ -6,17 +6,17 @@ struct SparseTable { return a[lidx] <= a[ridx] ? lidx : ridx; } - void init(vector<ll>* vec) { - int n = sz(*vec); - a = vec->data(); + void init(vector<ll> &vec) { + int n = ssize(vec); + a = vec.data(); st.assign(__lg(n) + 1, vector<int>(n)); - iota(all(st[0]), 0); + iota(begin(st[0]), end(st[0]), 0); for (int j = 0; (2 << j) <= n; j++) { for (int i = 0; i + (2 << j) <= n; i++) { st[j + 1][i] = better(st[j][i] , st[j][i + (1 << j)]); }}} - int queryIdempotent(int l, int r) { + int query(int l, int r) { if (r <= l) return -1; int j = __lg(r - l); //31 - builtin_clz(r - l); return better(st[j][l] , st[j][r - (1 << j)]); diff --git a/content/datastructures/sparseTableDisjoint.cpp b/content/datastructures/sparseTableDisjoint.cpp index 55165d4..bcf6b2e 100644 --- a/content/datastructures/sparseTableDisjoint.cpp +++ b/content/datastructures/sparseTableDisjoint.cpp @@ -7,16 +7,16 @@ struct DisjointST { return x + y; } - void init(vector<ll>* vec) { - int n = sz(*vec); - a = vec->data(); + void init(vector<ll> &vec) { + int n = ssize(vec); + a = vec.data(); dst.assign(__lg(n) + 1, vector<ll>(n + 1, neutral)); for (int h = 0, l = 1; l <= n; h++, l *= 2) { for (int c = l; c < n + l; c += 2 * l) { for (int i = c; i < min(n, c + l); i++) - dst[h][i + 1] = combine(dst[h][i], vec->at(i)); + dst[h][i + 1] = combine(dst[h][i], vec[i]); for (int i = min(n, c); i > c - l; i--) - dst[h][i - 1] = combine(vec->at(i - 1), dst[h][i]); + dst[h][i - 1] = combine(vec[i - 1], dst[h][i]); }}} ll query(int l, int r) { diff --git a/content/datastructures/stlHashMap.cpp b/content/datastructures/stlHashMap.cpp deleted file mode 100644 index b107dde..0000000 --- a/content/datastructures/stlHashMap.cpp +++ /dev/null @@ -1,17 +0,0 @@ -#include <ext/pb_ds/assoc_container.hpp> -using namespace __gnu_pbds; - -template<typename T> -struct betterHash { - size_t operator()(T o) const { - size_t h = hash<T>()(o) ^ 42394245; //random value - h = ((h >> 16) ^ h) * 0x45d9f3b; - h = ((h >> 16) ^ h) * 0x45d9f3b; - h = ((h >> 16) ^ h); - return h; -}}; - -template<typename K, typename V, typename H = betterHash<K>> -using hashMap = gp_hash_table<K, V, H>; -template<typename K, typename H = betterHash<K>> -using hashSet = gp_hash_table<K, null_type, H>; diff --git a/content/datastructures/stlPriorityQueue.cpp b/content/datastructures/stlPriorityQueue.cpp deleted file mode 100644 index 32b2455..0000000 --- a/content/datastructures/stlPriorityQueue.cpp +++ /dev/null @@ -1,8 +0,0 @@ -#include <ext/pb_ds/priority_queue.hpp> -template<typename T> -using pQueue = __gnu_pbds::priority_queue<T>; //<T, greater<T>> - -auto it = pq.push(5); -pq.modify(it, 6); -pq.join(pq2); -// push, join are O(1), pop, modify, erase O(log n) amortized diff --git a/content/datastructures/stlTree.cpp b/content/datastructures/stlTree.cpp deleted file mode 100644 index fbb68b9..0000000 --- a/content/datastructures/stlTree.cpp +++ /dev/null @@ -1,13 +0,0 @@ -#include <ext/pb_ds/assoc_container.hpp> -#include <ext/pb_ds/tree_policy.hpp> -using namespace std; using namespace __gnu_pbds; -template<typename T> -using Tree = tree<T, null_type, less<T>, rb_tree_tag, - tree_order_statistics_node_update>; - -int main() { - Tree<int> X; - for (int i : {1, 2, 4, 8, 16}) X.insert(i); - *X.find_by_order(3); // => 8 - X.order_of_key(10); // => 4 = min i, mit X[i] >= 10 -} diff --git a/content/datastructures/treap.cpp b/content/datastructures/treap.cpp index c5a60e9..bddfdb4 100644 --- a/content/datastructures/treap.cpp +++ b/content/datastructures/treap.cpp @@ -66,7 +66,7 @@ struct Treap { void insert(int i, ll val) { // and i = val auto [left, right] = split(root, i); treap.emplace_back(val); - left = merge(left, sz(treap) - 1); + left = merge(left, ssize(treap) - 1); root = merge(left, right); } diff --git a/content/datastructures/unionFind.cpp b/content/datastructures/unionFind.cpp index 1ee5178..8861790 100644 --- a/content/datastructures/unionFind.cpp +++ b/content/datastructures/unionFind.cpp @@ -1,26 +1,26 @@ -// unions[i] >= 0 => unions[i] = parent -// unions[i] < 0 => unions[i] = -size -vector<int> unions; +struct UnionFind { + vector<int> unions; // unions[i] = parent or unions[i] = -size -void init(int n) { //Initialisieren - unions.assign(n, -1); -} + UnionFind(int n): unions(n, -1) {} -int findSet(int a) { // Pfadkompression - if (unions[a] < 0) return a; - return unions[a] = findSet(unions[a]); -} + int find(int a) { + return unions[a] < 0 ? a : unions[a] = find(unions[a]); + } -void linkSets(int a, int b) { // Union by size. - if (unions[b] > unions[a]) swap(a, b); - unions[b] += unions[a]; - unions[a] = b; -} + bool link(int a, int b) { + if ((a = find(a)) == (b = find(b))) return false; + if (unions[b] > unions[a]) swap(a, b); + unions[b] += unions[a]; + unions[a] = b; + return true; + } -void unionSets(int a, int b) { // Diese Funktion aufrufen. - if (findSet(a) != findSet(b)) linkSets(findSet(a), findSet(b)); -} + int size(int a) { // optional + return -unions[find(a)]; + } -int size(int a) { // optional - return -unions[findSet(a)]; -} + int add() { // optional + unions.push_back(-1); + return ssize(unions) - 1; + } +}; diff --git a/content/datastructures/waveletTree.cpp b/content/datastructures/waveletTree.cpp index 090cdb2..55167b6 100644 --- a/content/datastructures/waveletTree.cpp +++ b/content/datastructures/waveletTree.cpp @@ -1,25 +1,20 @@ struct WaveletTree { - using it = vector<ll>::iterator; - WaveletTree *ln = nullptr, *rn = nullptr; + unique_ptr<WaveletTree> ln, rn; vector<int> b = {0}; ll lo, hi; - WaveletTree(vector<ll> in) : WaveletTree(all(in)) {} - - WaveletTree(it from, it to) : // call above one - lo(*min_element(from, to)), hi(*max_element(from, to) + 1) { + WaveletTree(auto in) : lo(*ranges::min_element(in)), + hi(*ranges::max_element(in) + 1) { ll mid = (lo + hi) / 2; - auto f = [&](ll x) {return x < mid;}; - for (it c = from; c != to; c++) { - b.push_back(b.back() + f(*c)); - } + auto f = [&](ll x) { return x < mid; }; + for (ll x: in) b.push_back(b.back() + f(x)); if (lo + 1 >= hi) return; - it pivot = stable_partition(from, to, f); - ln = new WaveletTree(from, pivot); - rn = new WaveletTree(pivot, to); + auto right = ranges::stable_partition(in, f); + ln = make_unique<WaveletTree>( + ranges::subrange(begin(in), begin(right))); + rn = make_unique<WaveletTree>(right); } - // kth element in sort[l, r) all 0-indexed ll kth(int l, int r, int k) { if (k < 0 || l + k >= r) return -1; if (lo + 1 >= hi) return lo; @@ -28,13 +23,10 @@ struct WaveletTree { else return rn->kth(l-b[l], r-b[r], k-inLeft); } - // count elements in[l, r) smaller than k int countSmaller(int l, int r, ll k) { if (l >= r || k <= lo) return 0; if (hi <= k) return r - l; return ln->countSmaller(b[l], b[r], k) + rn->countSmaller(l-b[l], r-b[r], k); } - - ~WaveletTree() {delete ln; delete rn;} }; diff --git a/content/geometry/antipodalPoints.cpp b/content/geometry/antipodalPoints.cpp index 110cc74..b34b175 100644 --- a/content/geometry/antipodalPoints.cpp +++ b/content/geometry/antipodalPoints.cpp @@ -1,12 +1,12 @@ vector<pair<int, int>> antipodalPoints(vector<pt>& h) { - if (sz(h) < 2) return {}; + if (ssize(h) < 2) return {}; vector<pair<int, int>> result; for (int i = 0, j = 1; i < j; i++) { while (true) { result.push_back({i, j}); - if (cross(h[(i + 1) % sz(h)] - h[i], - h[(j + 1) % sz(h)] - h[j]) <= 0) break; - j = (j + 1) % sz(h); + if (cross(h[(i + 1) % ssize(h)] - h[i], + h[(j + 1) % ssize(h)] - h[j]) <= 0) break; + j = (j + 1) % ssize(h); }} return result; } diff --git a/content/geometry/circle.cpp b/content/geometry/circle.cpp index 6789c52..155b55c 100644 --- a/content/geometry/circle.cpp +++ b/content/geometry/circle.cpp @@ -22,7 +22,7 @@ vector<pt> circleRayIntersection(pt center, double r, double c = norm(orig - center) - r * r; double discr = b * b - 4 * a * c; if (discr >= 0) { - //t in [0, 1] => schnitt mit Segment [orig, orig + dir] + //t in [0, 1] => Schnitt mit Segment [orig, orig + dir] double t1 = -(b + sqrt(discr)) / (2 * a); double t2 = -(b - sqrt(discr)) / (2 * a); if (t1 >= 0) result.push_back(t1 * dir + orig); diff --git a/content/geometry/closestPair.cpp b/content/geometry/closestPair.cpp index 9b115f3..bbefa67 100644 --- a/content/geometry/closestPair.cpp +++ b/content/geometry/closestPair.cpp @@ -4,12 +4,11 @@ ll rec(vector<pt>::iterator a, int l, int r) { ll midx = a[m].real(); ll ans = min(rec(a, l, m), rec(a, m, r)); - inplace_merge(a+l, a+m, a+r, [](const pt& x, const pt& y) { - return x.imag() < y.imag(); - }); + ranges::inplace_merge(a+l, a+m, a+r, {}, + [](pt x) { return imag(x); }); pt tmp[8]; - fill(all(tmp), a[l]); + ranges::fill(tmp, a[l]); for (int i = l + 1, next = 0; i < r; i++) { if (ll x = a[i].real() - midx; x * x < ans) { for (pt& p : tmp) ans = min(ans, norm(p - a[i])); @@ -19,9 +18,7 @@ ll rec(vector<pt>::iterator a, int l, int r) { return ans; } -ll shortestDist(vector<pt> a) { // sz(pts) > 1 - sort(all(a), [](const pt& x, const pt& y) { - return x.real() < y.real(); - }); - return rec(a.begin(), 0, sz(a)); +ll shortestDist(vector<pt> a) { // size(pts) > 1 + ranges::sort(a, {}, [](pt x) { return real(x); }); + return rec(a.begin(), 0, ssize(a)); } diff --git a/content/geometry/convexHull.cpp b/content/geometry/convexHull.cpp index 1173924..03c6343 100644 --- a/content/geometry/convexHull.cpp +++ b/content/geometry/convexHull.cpp @@ -1,18 +1,16 @@ vector<pt> convexHull(vector<pt> pts){ - sort(all(pts), [](const pt& a, const pt& b){ - return real(a) == real(b) ? imag(a) < imag(b) - : real(a) < real(b); - }); - pts.erase(unique(all(pts)), pts.end()); + ranges::sort(pts, {}, + [](pt x) { return pair{real(x), imag(x)}; }); + pts.erase(begin(ranges::unique(pts)), end(pts)); int k = 0; - vector<pt> h(2 * sz(pts)); - auto half = [&](auto begin, auto end, int t) { - for (auto it = begin; it != end; it++) { - while (k > t && cross(h[k-2], h[k-1], *it) <= 0) k--; - h[k++] = *it; + vector<pt> h(2 * ssize(pts)); + auto half = [&](auto &&v, int t) { + for (auto x: v) { + while (k > t && cross(h[k-2], h[k-1], x) <= 0) k--; + h[k++] = x; }}; - half(all(pts), 1); // Untere Hülle. - half(next(pts.rbegin()), pts.rend(), k); // Obere Hülle. + half(pts, 1); // Untere Hülle. + half(pts | views::reverse | views::drop(1), k); // Obere Hülle h.resize(k); return h; } diff --git a/content/geometry/delaunay.cpp b/content/geometry/delaunay.cpp index c813892..9ae9061 100644 --- a/content/geometry/delaunay.cpp +++ b/content/geometry/delaunay.cpp @@ -3,7 +3,8 @@ using pt = complex<lll>; constexpr pt INF_PT = pt(2e18, 2e18); -bool circ(pt p, pt a, pt b, pt c) {// p in circle(A,B,C), ABC must be ccw +// p in circle(A,B,C), ABC must be ccw +bool circ(pt p, pt a, pt b, pt c) { return imag((c-b)*conj(p-c)*(a-p)*conj(b-a)) < 0; } @@ -12,10 +13,10 @@ struct QuadEdge { QuadEdge* onext = nullptr; pt orig = INF_PT; bool used = false; - QuadEdge* rev() const {return rot->rot;} - QuadEdge* lnext() const {return rot->rev()->onext->rot;} - QuadEdge* oprev() const {return rot->onext->rot;} - pt dest() const {return rev()->orig;} + QuadEdge* rev() const { return rot->rot; } + QuadEdge* lnext() const { return rot->rev()->onext->rot; } + QuadEdge* oprev() const { return rot->onext->rot; } + pt dest() const { return rev()->orig; } }; deque<QuadEdge> edgeData; @@ -98,12 +99,10 @@ pair<QuadEdge*, QuadEdge*> rec(IT l, IT r) { } vector<pt> delaunay(vector<pt> pts) { - if (sz(pts) <= 2) return {}; - sort(all(pts), [](const pt& a, const pt& b) { - if (real(a) != real(b)) return real(a) < real(b); - return imag(a) < imag(b); - }); - QuadEdge* r = rec(all(pts)).first; + if (ssize(pts) <= 2) return {}; + ranges::sort(pts, {}, + [](pt x) { return pair{real(x), imag(x)}; }); + QuadEdge* r = rec(begin(pts), end(pts)).first; vector<QuadEdge*> edges = {r}; while (cross(r->onext->dest(), r->dest(), r->orig) < 0) r = r->onext; auto add = [&](QuadEdge* e){ @@ -117,7 +116,7 @@ vector<pt> delaunay(vector<pt> pts) { }; add(r); pts.clear(); - for (int i = 0; i < sz(edges); i++) { + for (int i = 0; i < ssize(edges); i++) { if (!edges[i]->used) add(edges[i]); } return pts; diff --git a/content/geometry/formulas.cpp b/content/geometry/formulas.cpp index 5d4e10d..b339451 100644 --- a/content/geometry/formulas.cpp +++ b/content/geometry/formulas.cpp @@ -6,20 +6,17 @@ constexpr double PIU = acos(-1.0l); // PIL < PI < PIU constexpr double PIL = PIU-2e-19l; // Winkel zwischen Punkt und x-Achse in [-PI, PI]. -double angle(pt a) {return arg(a);} +double angle(pt a) { return arg(a); } // rotiert Punkt im Uhrzeigersinn um den Ursprung. -pt rotate(pt a, double theta) {return a * polar(1.0, theta);} +pt rotate(pt a, double theta) { return a * polar(1.0, theta); } // Skalarprodukt. -auto dot(pt a, pt b) {return real(conj(a) * b);} - -// abs()^2.(pre c++20) -auto norm(pt a) {return dot(a, a);} +auto dot(pt a, pt b) { return real(conj(a) * b); } // Kreuzprodukt, 0, falls kollinear. -auto cross(pt a, pt b) {return imag(conj(a) * b);} -auto cross(pt p, pt a, pt b) {return cross(a - p, b - p);} +auto cross(pt a, pt b) { return imag(conj(a) * b); } +auto cross(pt p, pt a, pt b) { return cross(a - p, b - p); } // 1 => c links von a->b // 0 => a, b und c kolliniear diff --git a/content/geometry/formulas3d.cpp b/content/geometry/formulas3d.cpp index 63de2ce..66a4644 100644 --- a/content/geometry/formulas3d.cpp +++ b/content/geometry/formulas3d.cpp @@ -2,20 +2,20 @@ auto operator|(pt3 a, pt3 b) { return a.x * b.x + a.y*b.y + a.z*b.z; } -auto dot(pt3 a, pt3 b) {return a|b;} +auto dot(pt3 a, pt3 b) { return a|b; } // Kreuzprodukt -pt3 operator*(pt3 a, pt3 b) {return {a.y*b.z - a.z*b.y, - a.z*b.x - a.x*b.z, - a.x*b.y - a.y*b.x};} -pt3 cross(pt3 a, pt3 b) {return a*b;} +pt3 operator*(pt3 a, pt3 b) { return {a.y*b.z - a.z*b.y, + a.z*b.x - a.x*b.z, + a.x*b.y - a.y*b.x}; } +pt3 cross(pt3 a, pt3 b) { return a*b; } // Länge von a -double abs(pt3 a) {return sqrt(dot(a, a));} -double abs(pt3 a, pt3 b) {return abs(b - a);} +double abs(pt3 a) { return sqrt(dot(a, a)); } +double abs(pt3 a, pt3 b) { return abs(b - a); } // Mixedprodukt -auto mixed(pt3 a, pt3 b, pt3 c) {return a*b|c;}; +auto mixed(pt3 a, pt3 b, pt3 c) { return a*b|c; } // orientierung von p zu der Ebene durch a, b, c // -1 => gegen den Uhrzeigersinn, diff --git a/content/geometry/geometry.tex b/content/geometry/geometry.tex index 92285c4..9290de4 100644 --- a/content/geometry/geometry.tex +++ b/content/geometry/geometry.tex @@ -7,7 +7,7 @@ \sourcecode{geometry/closestPair.cpp} \end{algorithm} -\begin{algorithm}{Konvexehülle} +\begin{algorithm}{Konvexe Hülle} \begin{methods} \method{convexHull}{berechnet konvexe Hülle}{n\*\log(n)} \end{methods} @@ -18,6 +18,7 @@ \end{itemize} \sourcecode{geometry/convexHull.cpp} \end{algorithm} +\columnbreak \begin{algorithm}{Rotating calipers} \begin{methods} @@ -29,6 +30,7 @@ \subsection{Formeln~~--~\texttt{std::complex}} \sourcecode{geometry/formulas.cpp} +\columnbreak \sourcecode{geometry/linesAndSegments.cpp} \sourcecode{geometry/sortAround.cpp} \input{geometry/triangle} @@ -40,7 +42,7 @@ \sourcecode{geometry/formulas3d.cpp} \optional{ - \subsection{3D-Kugeln} + \subsection{3D-Kugeln \opthint} \sourcecode{geometry/spheres.cpp} } @@ -48,15 +50,22 @@ \sourcecode{geometry/hpi.cpp} \end{algorithm} +\begin{algorithm}[optional]{Intersecting Segments} + \begin{methods} + \method{intersect}{finds ids of intersecting segments}{n\*\log(n)} + \end{methods} + \sourcecode{geometry/segmentIntersection.cpp} +\end{algorithm} + \begin{algorithm}[optional]{Delaunay Triangulierung} \begin{methods} \method{delaunay}{berechnet Triangulierung}{n\*\log(n)} \end{methods} - \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Traingulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig. + \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Triangulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig. \sourcecode{geometry/delaunay.cpp} \end{algorithm} \optional{ -\subsection{Geraden} +\subsection{Geraden \opthint} \sourcecode{geometry/lines.cpp} } diff --git a/content/geometry/hpi.cpp b/content/geometry/hpi.cpp index 02c71e3..ec27254 100644 --- a/content/geometry/hpi.cpp +++ b/content/geometry/hpi.cpp @@ -1,6 +1,6 @@ constexpr ll INF = 0x1FFF'FFFF'FFFF'FFFF; //THIS CODE IS WIP -bool left(pt p) {return real(p) < 0 || +bool left(pt p) {return real(p) < 0 || (real(p) == 0 && imag(p) < 0);} struct hp { pt from, to; @@ -11,7 +11,7 @@ struct hp { bool dummy() const {return from == to;} pt dir() const {return dummy() ? to : to - from;} bool operator<(const hp& o) const { - if (left(dir()) != left(o.dir())) + if (left(dir()) != left(o.dir())) return left(dir()) > left(o.dir()); return cross(dir(), o.dir()) > 0; } diff --git a/content/geometry/linesAndSegments.cpp b/content/geometry/linesAndSegments.cpp index ddab554..985ee24 100644 --- a/content/geometry/linesAndSegments.cpp +++ b/content/geometry/linesAndSegments.cpp @@ -28,9 +28,7 @@ pt projectToLine(pt a, pt b, pt p) { // sortiert alle Punkte pts auf einer Linie entsprechend dir void sortLine(pt dir, vector<pt>& pts) { // (2d und 3d) - sort(all(pts), [&](pt a, pt b){ - return dot(dir, a) < dot(dir, b); - }); + ranges::sort(pts, {}, [&](pt x) { return dot(dir, x); }); } // Liegt p auf der Strecke a-b? (nutze < für inberhalb) @@ -66,7 +64,7 @@ vector<pt> segmentIntersection2(pt a, pt b, pt c, pt d) { double x = cross(b - a, d - c); double y = cross(c - a, d - c); double z = cross(b - a, a - c); - if (x < 0) {x = -x; y = -y; z = -z;} + if (x < 0) { x = -x; y = -y; z = -z; } if (y < -EPS || y-x > EPS || z < -EPS || z-x > EPS) return {}; if (x > EPS) return {a + y/x*(b - a)}; vector<pt> result; diff --git a/content/geometry/polygon.cpp b/content/geometry/polygon.cpp index 11ae2f7..ee45539 100644 --- a/content/geometry/polygon.cpp +++ b/content/geometry/polygon.cpp @@ -2,7 +2,7 @@ // Punkte gegen den Uhrzeigersinn: positiv, sonst negativ. double area(const vector<pt>& poly) { //poly[0] == poly.back() ll res = 0; - for (int i = 0; i + 1 < sz(poly); i++) + for (int i = 0; i + 1 < ssize(poly); i++) res += cross(poly[i], poly[i + 1]); return 0.5 * res; } @@ -13,7 +13,7 @@ double area(const vector<pt>& poly) { //poly[0] == poly.back() // selbstschneidenden Polygonen (definitions Sache) ll windingNumber(pt p, const vector<pt>& poly) { ll res = 0; - for (int i = 0; i + 1 < sz(poly); i++) { + for (int i = 0; i + 1 < ssize(poly); i++) { pt a = poly[i], b = poly[i + 1]; if (real(a) > real(b)) swap(a, b); if (real(a) <= real(p) && real(p) < real(b) && @@ -26,7 +26,7 @@ ll windingNumber(pt p, const vector<pt>& poly) { // check if p is inside poly (any polygon poly[0] == poly.back()) bool inside(pt p, const vector<pt>& poly) { bool in = false; - for (int i = 0; i + 1 < sz(poly); i++) { + for (int i = 0; i + 1 < ssize(poly); i++) { pt a = poly[i], b = poly[i + 1]; if (pointOnSegment(a, b, p)) return false; // border counts? if (real(a) > real(b)) swap(a, b); @@ -40,7 +40,7 @@ bool inside(pt p, const vector<pt>& poly) { // convex hull without duplicates, h[0] != h.back() // apply comments if border counts as inside bool insideConvex(pt p, const vector<pt>& hull) { - int l = 0, r = sz(hull) - 1; + int l = 0, r = ssize(hull) - 1; if (cross(hull[0], hull[r], p) >= 0) return false; // > 0 while (l + 1 < r) { int m = (l + r) / 2; @@ -51,11 +51,9 @@ bool insideConvex(pt p, const vector<pt>& hull) { } void rotateMin(vector<pt>& hull) { - auto mi = min_element(all(hull), [](const pt& a, const pt& b){ - return real(a) == real(b) ? imag(a) < imag(b) - : real(a) < real(b); - }); - rotate(hull.begin(), mi, hull.end()); + auto mi = ranges::min_element(hull, {}, + [](pt a) { return pair{real(a), imag(a)}; }); + ranges::rotate(hull, mi); } // convex hulls without duplicates, h[0] != h.back() @@ -67,7 +65,7 @@ vector<pt> minkowski(vector<pt> ps, vector<pt> qs) { ps.push_back(ps[1]); qs.push_back(qs[1]); vector<pt> res; - for (ll i = 0, j = 0; i + 2 < sz(ps) || j + 2 < sz(qs);) { + for (ll i = 0, j = 0; i+2 < ssize(ps) || j+2 < ssize(qs);) { res.push_back(ps[i] + qs[j]); auto c = cross(ps[i + 1] - ps[i], qs[j + 1] - qs[j]); if(c >= 0) i++; @@ -83,22 +81,22 @@ double dist(const vector<pt>& ps, vector<pt> qs) { p.push_back(p[0]); double res = INF; bool intersect = true; - for (ll i = 0; i + 1 < sz(p); i++) { + for (ll i = 0; i + 1 < ssize(p); i++) { intersect &= cross(p[i], p[i+1]) >= 0; res = min(res, distToSegment(p[i], p[i+1], 0)); } return intersect ? 0 : res; } -bool left(pt of, pt p) {return cross(p, of) < 0 || - (cross(p, of) == 0 && dot(p, of) > 0);} +bool left(pt of, pt p) { return cross(p, of) < 0 || + (cross(p, of) == 0 && dot(p, of) > 0); } // convex hulls without duplicates, hull[0] == hull.back() and // hull[0] must be a convex point (with angle < pi) // returns index of corner where dot(dir, corner) is maximized int extremal(const vector<pt>& hull, pt dir) { dir *= pt(0, 1); - int l = 0, r = sz(hull) - 1; + int l = 0, r = ssize(hull) - 1; while (l + 1 < r) { int m = (l + r) / 2; pt dm = hull[m+1]-hull[m]; @@ -110,7 +108,7 @@ int extremal(const vector<pt>& hull, pt dir) { if (cross(dir, dm) < 0) l = m; else r = m; }} - return r % (sz(hull) - 1); + return r % (ssize(hull) - 1); } // convex hulls without duplicates, hull[0] == hull.back() and @@ -126,7 +124,7 @@ vector<int> intersectLine(const vector<pt>& hull, pt a, pt b) { if (cross(hull[endA], a, b) > 0 || cross(hull[endB], a, b) < 0) return {}; - int n = sz(hull) - 1; + int n = ssize(hull) - 1; vector<int> res; for (auto _ : {0, 1}) { int l = endA, r = endB; diff --git a/content/geometry/segmentIntersection.cpp b/content/geometry/segmentIntersection.cpp index afc01b2..9fdbdb8 100644 --- a/content/geometry/segmentIntersection.cpp +++ b/content/geometry/segmentIntersection.cpp @@ -39,10 +39,10 @@ pair<int, int> intersect(vector<seg>& segs) { events.push_back({s.a, s.id, 1}); events.push_back({s.b, s.id, -1}); } - sort(all(events)); + ranges::sort(events, less{}); set<seg> q; - vector<set<seg>::iterator> where(sz(segs)); + vector<set<seg>::iterator> where(ssize(segs)); for (auto e : events) { int id = e.id; if (e.type > 0) { diff --git a/content/geometry/sortAround.cpp b/content/geometry/sortAround.cpp index 98d17a8..7e9d1de 100644 --- a/content/geometry/sortAround.cpp +++ b/content/geometry/sortAround.cpp @@ -1,11 +1,11 @@ -bool left(pt p) {return real(p) < 0 ||
- (real(p) == 0 && imag(p) < 0);}
-
-// counter clockwise, starting with "11:59"
-void sortAround(pt p, vector<pt>& ps) {
- sort(all(ps), [&](const pt& a, const pt& b){
- if (left(a - p) != left(b - p))
- return left(a - p) > left(b - p);
- return cross(p, a, b) > 0;
- });
-}
+bool left(pt p) { return real(p) < 0 || + (real(p) == 0 && imag(p) < 0); } + +// counter clockwise, starting with "11:59" +void sortAround(pt p, vector<pt>& ps) { + ranges::sort(ps, [&](const pt& a, const pt& b){ + if (left(a - p) != left(b - p)) + return left(a - p) > left(b - p); + return cross(p, a, b) > 0; + }); +} diff --git a/content/geometry/triangle.cpp b/content/geometry/triangle.cpp index 534bb10..eab17f4 100644 --- a/content/geometry/triangle.cpp +++ b/content/geometry/triangle.cpp @@ -1,5 +1,5 @@ // Mittelpunkt des Dreiecks abc. -pt centroid(pt a, pt b, pt c) {return (a + b + c) / 3.0;} +pt centroid(pt a, pt b, pt c) { return (a + b + c) / 3.0; } // Flächeninhalt eines Dreicks bei bekannten Eckpunkten. double area(pt a, pt b, pt c) { @@ -30,7 +30,7 @@ pt circumCenter(pt a, pt b, pt c) { // -1 => p außerhalb Kreis durch a,b,c // 0 => p auf Kreis durch a,b,c // 1 => p im Kreis durch a,b,c -int insideOutCenter(pt a, pt b, pt c, pt p) {// braucht lll +int insideOutCenter(pt a, pt b, pt c, pt p) { // braucht lll return ccw(a,b,c) * sgn(imag((c-b)*conj(p-c)*(a-p)*conj(b-a))); } diff --git a/content/graph/2sat.cpp b/content/graph/2sat.cpp index 3e0811f..d4c8b7b 100644 --- a/content/graph/2sat.cpp +++ b/content/graph/2sat.cpp @@ -1,28 +1,28 @@ -constexpr int var(int i) {return i << 1;} // use this! -struct sat2 { - int n; // + scc variablen +constexpr int var(int i) { return i << 1; } // use this! +struct SAT2 { + int n; + vector<vector<int>> adj; vector<int> sol; - sat2(int vars) : n(vars*2), adj(n) {} + SAT2(int vars) : n(vars*2), adj(n) {} void addImpl(int a, int b) { adj[a].push_back(b); adj[1^b].push_back(1^a); } - void addEquiv(int a, int b) {addImpl(a, b); addImpl(b, a);} - void addOr(int a, int b) {addImpl(1^a, b);} - void addXor(int a, int b) {addOr(a, b); addOr(1^a, 1^b);} - void addTrue(int a) {addImpl(1^a, a);} - void addFalse(int a) {addTrue(1^a);} - void addAnd(int a, int b) {addTrue(a); addTrue(b);} - void addNand(int a, int b) {addOr(1^a, 1^b);} + void addEquiv(int a, int b) { addImpl(a, b); addImpl(b, a); } + void addOr(int a, int b) { addImpl(1^a, b); } + void addXor(int a, int b) { addEquiv(a, 1^b); } + void addTrue(int a) { addImpl(1^a, a); } + void addFalse(int a) { addTrue(1^a); } + void addAnd(int a, int b) { addTrue(a); addTrue(b); } + void addNand(int a, int b) { addOr(1^a, 1^b); } bool solve() { - scc(); //scc code von oben + SCC scc(adj); // SCC @\sourceref{graph/scc.cpp}@ sol.assign(n, -1); - for (int i = 0; i < n; i += 2) { - if (idx[i] == idx[i + 1]) return false; - sol[i] = idx[i] < idx[i + 1]; - sol[i + 1] = !sol[i]; + for (int i = 0; i < n; i++) { + if (scc.idx[i] == scc.idx[1^i]) return false; + sol[i] = scc.idx[i] < scc.idx[1^i]; } return true; } diff --git a/content/graph/LCA_sparse.cpp b/content/graph/LCA_sparse.cpp index 221b5ed..1da8876 100644 --- a/content/graph/LCA_sparse.cpp +++ b/content/graph/LCA_sparse.cpp @@ -5,12 +5,12 @@ struct LCA { SparseTable st; //sparse table @\sourceref{datastructures/sparseTable.cpp}@ void init(vector<vector<int>>& adj, int root) { - depth.assign(2 * sz(adj), 0); - visited.assign(2 * sz(adj), -1); - first.assign(sz(adj), 2 * sz(adj)); + depth.assign(2 * ssize(adj), 0); + visited.assign(2 * ssize(adj), -1); + first.assign(ssize(adj), 2 * ssize(adj)); idx = 0; dfs(adj, root); - st.init(&depth); + st.init(depth); } void dfs(vector<vector<int>>& adj, int v, ll d=0) { @@ -18,15 +18,15 @@ struct LCA { first[v] = min(idx, first[v]), idx++; for (int u : adj[v]) { - if (first[u] == 2 * sz(adj)) { + if (first[u] == 2 * ssize(adj)) { dfs(adj, u, d + 1); visited[idx] = v, depth[idx] = d, idx++; }}} int getLCA(int u, int v) { if (first[u] > first[v]) swap(u, v); - return visited[st.queryIdempotent(first[u], first[v] + 1)]; + return visited[st.query(first[u], first[v] + 1)]; } - ll getDepth(int v) {return depth[first[v]];} + ll getDepth(int v) { return depth[first[v]]; } }; diff --git a/content/graph/TSP.cpp b/content/graph/TSP.cpp index 6223858..4d2479c 100644 --- a/content/graph/TSP.cpp +++ b/content/graph/TSP.cpp @@ -1,7 +1,7 @@ vector<vector<ll>> dist; // Entfernung zwischen je zwei Punkten. auto TSP() { - int n = sz(dist), m = 1 << n; + int n = ssize(dist), m = 1 << n; vector<vector<edge>> dp(n, vector<edge>(m, edge{INF, -1})); for (int c = 0; c < n; c++) @@ -21,7 +21,7 @@ auto TSP() { vector<int> tour = {0}; int v = 0; - while (tour.back() != 0 || sz(tour) == 1) + while (tour.back() != 0 || ssize(tour) == 1) tour.push_back(dp[tour.back()] [(v |= (1 << tour.back()))].to); // Enthält Knoten 0 zweimal. An erster und letzter Position. diff --git a/content/graph/articulationPoints.cpp b/content/graph/articulationPoints.cpp index 25ff67e..60970e6 100644 --- a/content/graph/articulationPoints.cpp +++ b/content/graph/articulationPoints.cpp @@ -14,14 +14,14 @@ int dfs(int v, int from = -1) { if (num[e.to] < me) st.push_back(e); } else { if (v == root) rootCount++; - int si = sz(st); + int si = ssize(st); int up = dfs(e.to, e.id); top = min(top, up); if (up >= me) isArt[v] = true; if (up > me) bridges.push_back(e); if (up <= me) st.push_back(e); if (up == me) { - bcc.emplace_back(si + all(st)); + bcc.emplace_back(begin(st) + si, end(st)); st.resize(si); }}} return top; @@ -29,12 +29,12 @@ int dfs(int v, int from = -1) { void find() { counter = 0; - num.assign(sz(adj), 0); - isArt.assign(sz(adj), false); + num.assign(ssize(adj), 0); + isArt.assign(ssize(adj), false); bridges.clear(); st.clear(); bcc.clear(); - for (int v = 0; v < sz(adj); v++) { + for (int v = 0; v < ssize(adj); v++) { if (!num[v]) { root = v; rootCount = 0; diff --git a/content/graph/binary_lifting.cpp b/content/graph/binary_lifting.cpp new file mode 100644 index 0000000..5ed6c07 --- /dev/null +++ b/content/graph/binary_lifting.cpp @@ -0,0 +1,28 @@ +struct Lift { + vector<int> dep, par, jmp; + + Lift(vector<vector<int>> &adj, int root): + dep(adj.size()), par(adj.size()), jmp(adj.size(), root) { + auto dfs = [&](auto &&self, int u, int p, int d) -> void { + dep[u] = d, par[u] = p; + jmp[u] = dep[p] + dep[jmp[jmp[p]]] == 2*dep[jmp[p]] + ? jmp[jmp[p]] : p; + for (int v: adj[u]) if (v != p) self(self, v, u, d+1); + }; + dfs(dfs, root, root, 0); + } + + int depth(int v) { return dep[v]; } + int lift(int v, int d) { + while (dep[v] > d) v = dep[jmp[v]] < d ? par[v] : jmp[v]; + return v; + } + int lca(int u, int v) { + v = lift(v, dep[u]), u = lift(u, dep[v]); + while (u != v) { + auto &a = jmp[u] == jmp[v] ? par : jmp; + u = a[u], v = a[v]; + } + return u; + } +}; diff --git a/content/graph/bitonicTSP.cpp b/content/graph/bitonicTSP.cpp index f025bca..b42f089 100644 --- a/content/graph/bitonicTSP.cpp +++ b/content/graph/bitonicTSP.cpp @@ -1,10 +1,10 @@ vector<vector<double>> dist; // Initialisiere mit Entfernungen zwischen Punkten. -auto bitonicTSP() { // n >= 2! - vector<double> dp(sz(dist), HUGE_VAL); - vector<int> pre(sz(dist)); // nur für Tour +auto bitonicTSP() { // n >= 2 + vector<double> dp(ssize(dist), HUGE_VAL); + vector<int> pre(ssize(dist)); // nur für Tour dp[0] = 0; dp[1] = 2 * dist[0][1]; pre[1] = 0; - for (unsigned int i = 2; i < sz(dist); i++) { + for (unsigned int i = 2; i < ssize(dist); i++) { double link = 0; for (int j = i - 2; j >= 0; j--) { link += dist[j + 1][j + 2]; @@ -13,9 +13,9 @@ auto bitonicTSP() { // n >= 2! dp[i] = opt; pre[i] = j; }}} - // return dp.back(); // Länger der Tour + // return dp.back(); // Länge der Tour - int j, n = sz(dist) - 1; + int j, n = ssize(dist) - 1; vector<int> ut, lt = {n, n - 1}; do { j = pre[n]; @@ -25,7 +25,7 @@ auto bitonicTSP() { // n >= 2! } } while(n = j + 1, j > 0); (lt.back() == 1 ? lt : ut).push_back(0); - reverse(all(lt)); - lt.insert(lt.end(), all(ut)); + ranges::reverse(lt); + lt.insert(end(lt), begin(ut), end(ut)); return lt; // Enthält Knoten 0 zweimal. An erster und letzter Position. } diff --git a/content/graph/bitonicTSPsimple.cpp b/content/graph/bitonicTSPsimple.cpp index cacfb9c..b6d72d8 100644 --- a/content/graph/bitonicTSPsimple.cpp +++ b/content/graph/bitonicTSPsimple.cpp @@ -3,7 +3,7 @@ vector<vector<double>> dp; double get(int p1, int p2) { int v = max(p1, p2) + 1; - if (v == sz(dist)) return dist[p1][v - 1] + dist[p2][v - 1]; + if (v == ssize(dist)) return dist[p1][v - 1] + dist[p2][v - 1]; if (dp[p1][p2] >= 0.0) return dp[p1][p2]; double tryLR = dist[p1][v] + get(v, p2); double tryRL = dist[p2][v] + get(p1, v); @@ -11,17 +11,19 @@ double get(int p1, int p2) { } auto bitonicTSP() { - dp = vector<vector<double>>(sz(dist), - vector<double>(sz(dist), -1)); + dp = vector<vector<double>>(ssize(dist), + vector<double>(ssize(dist), -1)); get(0, 0); - // return dp[0][0]; // Länger der Tour + // return dp[0][0]; // Länge der Tour vector<int> lr = {0}, rl = {0}; - for (int p1 = 0, p2 = 0, v; (v = max(p1, p2)+1) < sz(dist);) { + for (int p1 = 0, p2 = 0, v; + (v = max(p1, p2)+1) < ssize(dist);) { if (dp[p1][p2] == dist[p1][v] + dp[v][p2]) { lr.push_back(v); p1 = v; } else { rl.push_back(v); p2 = v; }} lr.insert(lr.end(), rl.rbegin(), rl.rend()); - return lr; // Enthält Knoten 0 zweimal. An erster und letzter Position. + // Enthält Knoten 0 zweimal. An erster und letzter Position. + return lr; } diff --git a/content/graph/blossom.cpp b/content/graph/blossom.cpp index 7bd494a..3c9bd31 100644 --- a/content/graph/blossom.cpp +++ b/content/graph/blossom.cpp @@ -32,7 +32,7 @@ struct GM { auto h = label[r] = label[s] = {~x, y}; int join; while (true) { - if (s != sz(adj)) swap(r, s); + if (s != ssize(adj)) swap(r, s); r = findFirst(label[pairs[r]].first); if (label[r] == h) { join = r; @@ -48,13 +48,13 @@ struct GM { }}} bool augment(int v) { - label[v] = {sz(adj), -1}; - first[v] = sz(adj); + label[v] = {ssize(adj), -1}; + first[v] = ssize(adj); head = tail = 0; for (que[tail++] = v; head < tail;) { int x = que[head++]; for (int y : adj[x]) { - if (pairs[y] == sz(adj) && y != v) { + if (pairs[y] == ssize(adj) && y != v) { pairs[y] = x; rematch(x, y); return true; @@ -70,12 +70,12 @@ struct GM { int match() { int matching = head = tail = 0; - for (int v = 0; v < sz(adj); v++) { - if (pairs[v] < sz(adj) || !augment(v)) continue; + for (int v = 0; v < ssize(adj); v++) { + if (pairs[v] < ssize(adj) || !augment(v)) continue; matching++; for (int i = 0; i < tail; i++) label[que[i]] = label[pairs[que[i]]] = {-1, -1}; - label[sz(adj)] = {-1, -1}; + label[ssize(adj)] = {-1, -1}; } return matching; } diff --git a/content/graph/bronKerbosch.cpp b/content/graph/bronKerbosch.cpp index 9f7d8c5..144707a 100644 --- a/content/graph/bronKerbosch.cpp +++ b/content/graph/bronKerbosch.cpp @@ -11,7 +11,7 @@ void bronKerboschRec(bits R, bits P, bits X) { } else { int q = (P | X)._Find_first(); bits cands = P & ~adj[q]; - for (int i = 0; i < sz(adj); i++) if (cands[i]) { + for (int i = 0; i < ssize(adj); i++) if (cands[i]) { R[i] = 1; bronKerboschRec(R, P & adj[i], X & adj[i]); R[i] = P[i] = 0; @@ -20,5 +20,5 @@ void bronKerboschRec(bits R, bits P, bits X) { void bronKerbosch() { cliques.clear(); - bronKerboschRec({}, {(1ull << sz(adj)) - 1}, {}); + bronKerboschRec({}, {(1ull << ssize(adj)) - 1}, {}); } diff --git a/content/graph/centroid.cpp b/content/graph/centroid.cpp index 820945b..3cd5519 100644 --- a/content/graph/centroid.cpp +++ b/content/graph/centroid.cpp @@ -15,7 +15,7 @@ pair<int, int> dfs_cent(int v, int from, int n) { } pair<int, int> find_centroid(int root = 0) { - s.resize(sz(adj)); + s.resize(ssize(adj)); dfs_sz(root); return dfs_cent(root, -1, s[root]); } diff --git a/content/graph/cycleCounting.cpp b/content/graph/cycleCounting.cpp index 6a299ee..b7545d5 100644 --- a/content/graph/cycleCounting.cpp +++ b/content/graph/cycleCounting.cpp @@ -9,8 +9,8 @@ struct cycles { cycles(int n) : adj(n), seen(n), paths(n) {} void addEdge(int u, int v) { - adj[u].push_back({v, sz(edges)}); - adj[v].push_back({u, sz(edges)}); + adj[u].push_back({v, ssize(edges)}); + adj[v].push_back({u, ssize(edges)}); edges.push_back({u, v}); } @@ -36,26 +36,24 @@ struct cycles { cur[id].flip(); }}} - bool isCycle(cycle cur) {//cycle must be constrcuted from base + bool isCycle(cycle cur) {// cycle must be constructed from base if (cur.none()) return false; - init(sz(adj)); // union find @\sourceref{datastructures/unionFind.cpp}@ - for (int i = 0; i < sz(edges); i++) { + UnionFind uf(ssize(adj)); // union find @\sourceref{datastructures/unionFind.cpp}@ + for (int i = 0; i < ssize(edges); i++) { if (cur[i]) { cur[i] = false; - if (findSet(edges[i].first) == - findSet(edges[i].second)) break; - unionSets(edges[i].first, edges[i].second); + if (!uf.link(edges[i].first, edges[i].second)) break; }} return cur.none(); } int count() { - for (int i = 0; i < sz(adj); i++) findBase(i); - assert(sz(base) < 30); + for (int i = 0; i < ssize(adj); i++) findBase(i); + assert(ssize(base) < 30); int res = 0; - for (int i = 1; i < (1 << sz(base)); i++) { + for (int i = 1; i < (1 << ssize(base)); i++) { cycle cur; - for (int j = 0; j < sz(base); j++) + for (int j = 0; j < ssize(base); j++) if (((i >> j) & 1) != 0) cur ^= base[j]; if (isCycle(cur)) res++; } diff --git a/content/graph/dijkstra.cpp b/content/graph/dijkstra.cpp index 61c636d..ab4bef9 100644 --- a/content/graph/dijkstra.cpp +++ b/content/graph/dijkstra.cpp @@ -1,21 +1,18 @@ -using path = pair<ll, int>; //dist, destination +using Dist = ll; -auto dijkstra(const vector<vector<path>>& adj, int start) { - priority_queue<path, vector<path>, greater<path>> pq; - vector<ll> dist(sz(adj), INF); - vector<int> prev(sz(adj), -1); - dist[start] = 0; pq.emplace(0, start); +auto dijkstra(vector<vector<pair<int, Dist>>> &adj, int start) { + priority_queue<pair<Dist, int>> pq; + vector<Dist> dist(ssize(adj), INF); + dist[start] = 0, pq.emplace(0, start); - while (!pq.empty()) { - auto [dv, v] = pq.top(); pq.pop(); - if (dv > dist[v]) continue; // WICHTIG! + while (!empty(pq)) { + auto [du, u] = pq.top(); + du = -du, pq.pop(); + if (du > dist[u]) continue; // WICHTIG! - for (auto [du, u] : adj[v]) { - ll newDist = dv + du; - if (newDist < dist[u]) { - dist[u] = newDist; - prev[u] = v; - pq.emplace(dist[u], u); - }}} - return dist; //return prev; + for (auto [v, d]: adj[u]) { + Dist dv = du + d; + if (dv < dist[v]) dist[v] = dv, pq.emplace(-dv, v); + }} + return dist; } diff --git a/content/graph/dinic.cpp b/content/graph/dinic.cpp deleted file mode 100644 index 2e58a2d..0000000 --- a/content/graph/dinic.cpp +++ /dev/null @@ -1,55 +0,0 @@ -struct Edge { - int to, rev; - ll f, c; -}; - -vector<vector<Edge>> adj; -int s, t; -vector<int> pt, dist; - -void addEdge(int u, int v, ll c) { - adj[u].push_back({v, (int)sz(adj[v]), 0, c}); - adj[v].push_back({u, (int)sz(adj[u]) - 1, 0, 0}); -} - -bool bfs() { - dist.assign(sz(adj), -1); - dist[s] = 0; - queue<int> q({s}); - while (!q.empty() && dist[t] < 0) { - int v = q.front(); q.pop(); - for (Edge& e : adj[v]) { - if (dist[e.to] < 0 && e.c - e.f > 0) { - dist[e.to] = dist[v] + 1; - q.push(e.to); - }}} - return dist[t] >= 0; -} - -ll dfs(int v, ll flow = INF) { - if (v == t || flow == 0) return flow; - for (; pt[v] < sz(adj[v]); pt[v]++) { - Edge& e = adj[v][pt[v]]; - if (dist[e.to] != dist[v] + 1) continue; - ll cur = dfs(e.to, min(e.c - e.f, flow)); - if (cur > 0) { - e.f += cur; - adj[e.to][e.rev].f -= cur; - return cur; - }} - return 0; -} - -ll maxFlow(int source, int target) { - s = source, t = target; - ll flow = 0; - while (bfs()) { - pt.assign(sz(adj), 0); - ll cur; - do { - cur = dfs(s); - flow += cur; - } while (cur > 0); - } - return flow; -} diff --git a/content/graph/dinicScaling.cpp b/content/graph/dinitzScaling.cpp index b0828d0..c612924 100644 --- a/content/graph/dinicScaling.cpp +++ b/content/graph/dinitzScaling.cpp @@ -8,12 +8,12 @@ int s, t; vector<int> pt, dist; void addEdge(int u, int v, ll c) { - adj[u].push_back({v, (int)sz(adj[v]), 0, c}); - adj[v].push_back({u, (int)sz(adj[u]) - 1, 0, 0}); + adj[u].push_back({v, (int)ssize(adj[v]), 0, c}); + adj[v].push_back({u, (int)ssize(adj[u]) - 1, 0, 0}); } bool bfs(ll lim) { - dist.assign(sz(adj), -1); + dist.assign(ssize(adj), -1); dist[s] = 0; queue<int> q({s}); while (!q.empty() && dist[t] < 0) { @@ -28,7 +28,7 @@ bool bfs(ll lim) { ll dfs(int v, ll flow) { if (v == t || flow == 0) return flow; - for (; pt[v] < sz(adj[v]); pt[v]++) { + for (; pt[v] < ssize(adj[v]); pt[v]++) { Edge& e = adj[v][pt[v]]; if (dist[e.to] != dist[v] + 1) continue; ll cur = dfs(e.to, min(e.c - e.f, flow)); @@ -43,10 +43,11 @@ ll dfs(int v, ll flow) { ll maxFlow(int source, int target) { s = source, t = target; ll flow = 0; - // set lim = 1 and use dfs(s, INF) to disable scaling + // If capacities are small, may want to disable scaling: + // Run bfs with lim = 1, and dfs with lim = INF. for (ll lim = (1LL << 62); lim >= 1; lim /= 2) { while (bfs(lim)) { - pt.assign(sz(adj), 0); + pt.assign(ssize(adj), 0); ll cur; do { cur = dfs(s, lim); diff --git a/content/graph/euler.cpp b/content/graph/euler.cpp index e81cebe..d45dac0 100644 --- a/content/graph/euler.cpp +++ b/content/graph/euler.cpp @@ -2,8 +2,8 @@ vector<vector<pair<int, int>>> adj; // gets destroyed! vector<int> cycle; void addEdge(int u, int v) { - adj[u].emplace_back(v, sz(adj[v])); - adj[v].emplace_back(u, sz(adj[u]) - 1); // remove for directed + adj[u].emplace_back(v, ssize(adj[v])); + adj[v].emplace_back(u, ssize(adj[u]) - 1); // remove for directed } void euler(int v) { diff --git a/content/graph/floydWarshall.cpp b/content/graph/floydWarshall.cpp index df096c2..1a1138d 100644 --- a/content/graph/floydWarshall.cpp +++ b/content/graph/floydWarshall.cpp @@ -2,16 +2,16 @@ vector<vector<ll>> dist; // Entfernung zwischen je zwei Punkten. vector<vector<int>> next; void floydWarshall() { - next.assign(sz(dist), vector<int>(sz(dist), -1)); - for (int i = 0; i < sz(dist); i++) { - for (int j = 0; j < sz(dist); j++) { + next.assign(ssize(dist), vector<int>(ssize(dist), -1)); + for (int i = 0; i < ssize(dist); i++) { + for (int j = 0; j < ssize(dist); j++) { if (dist[i][j] < INF) { next[i][j] = j; }}} - for (int k = 0; k < sz(dist); k++) { - for (int i = 0; i < sz(dist); i++) { - for (int j = 0; j < sz(dist); j++) { + for (int k = 0; k < ssize(dist); k++) { + for (int i = 0; i < ssize(dist); i++) { + for (int j = 0; j < ssize(dist); j++) { // only needed if dist can be negative if (dist[i][k] == INF || dist[k][j] == INF) continue; if (dist[i][j] > dist[i][k] + dist[k][j]) { diff --git a/content/graph/graph.tex b/content/graph/graph.tex index 7389ce6..f6f3d02 100644 --- a/content/graph/graph.tex +++ b/content/graph/graph.tex @@ -1,12 +1,5 @@ \section{Graphen} -\begin{algorithm}{Kruskal} - \begin{methods}[ll] - berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\ - \end{methods} - \sourcecode{graph/kruskal.cpp} -\end{algorithm} - \begin{algorithm}{Minimale Spannbäume} \paragraph{Schnitteigenschaft} Für jeden Schnitt $C$ im Graphen gilt: @@ -16,6 +9,14 @@ \paragraph{Kreiseigenschaft} Für jeden Kreis $K$ im Graphen gilt: Die schwerste Kante auf dem Kreis ist nicht Teil des minimalen Spannbaums. + + \optional{ + \subsubsection{\textsc{Kruskal}'s Algorithm \opthint} + \begin{methods}[ll] + berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\ + \end{methods} + \sourcecode{graph/kruskal.cpp} + } \end{algorithm} \begin{algorithm}{Heavy-Light Decomposition} @@ -28,7 +29,7 @@ \sourcecode{graph/hld.cpp} \end{algorithm} -\begin{algorithm}{Lowest Common Ancestor} +\begin{algorithm}[optional]{Lowest Common Ancestor} \begin{methods} \method{init}{baut DFS-Baum über $g$ auf}{\abs{V}\*\log(\abs{V})} \method{getLCA}{findet LCA}{1} @@ -37,6 +38,17 @@ \sourcecode{graph/LCA_sparse.cpp} \end{algorithm} +\begin{algorithm}{Binary Lifting} + % https://codeforces.com/blog/entry/74847 + \begin{methods} + \method{Lift}{constructor}{\abs{V}} + \method{depth}{distance to root of vertex $v$}{1} + \method{lift}{vertex above $v$ at depth $d$}{\log(\abs{V})} + \method{lca}{lowest common ancestor of $u$ and $v$}{\log(\abs{V})} + \end{methods} + \sourcecode{graph/binary_lifting.cpp} +\end{algorithm} + \begin{algorithm}{Centroids} \begin{methods} \method{find\_centroid}{findet alle Centroids des Baums (maximal 2)}{\abs{V}} @@ -128,12 +140,12 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \begin{algorithm}{Dynamic Connectivity} \begin{methods} \method{Constructor}{erzeugt Baum ($n$ Knoten, $m$ updates)}{n+m} - \method{addEdge}{fügt Kante ein,\code{id}=delete Zeitpunkt}{\log(n)} + \method{addEdge}{fügt Kante ein, \code{id} = delete-Zeitpunkt}{\log(n)} \method{eraseEdge}{entfernt Kante \code{id}}{\log(n)} \end{methods} \sourcecode{graph/connect.cpp} \end{algorithm} - +\columnbreak @@ -150,7 +162,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \sourcecode{graph/bronKerbosch.cpp} \end{algorithm} -\begin{algorithm}{Maximum Cardinatlity Bipartite Matching} +\begin{algorithm}{Maximum Cardinality Bipartite Matching} \label{kuhn} \begin{methods} \method{kuhn}{berechnet Matching}{\abs{V}\*\min(ans^2, \abs{E})} @@ -158,7 +170,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \begin{itemize} \item die ersten [0..l) Knoten in \code{adj} sind die linke Seite des Graphen \end{itemize} - \sourcecode{graph/maxCarBiMatch.cpp} + \sourcecode{graph/kuhn.cpp} \columnbreak \begin{methods} \method{hopcroft\_karp}{berechnet Matching}{\sqrt{\abs{V}}\*\abs{E}} @@ -211,7 +223,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \sourcecode{graph/virtualTree.cpp} \end{algorithm} -\begin{algorithm}{Erd\H{o}s-Gallai} +\begin{algorithm}{\textsc{Erd\H{o}s-Gallai}} Sei $d_1 \geq \cdots \geq d_{n}$. Es existiert genau dann ein Graph $G$ mit Degreesequence $d$ falls $\sum\limits_{i=1}^{n} d_i$ gerade ist und für $1\leq k \leq n$: $\sum\limits_{i=1}^{k} d_i \leq k\cdot(k-1)+\sum\limits_{i=k+1}^{n} \min(d_i, k)$ \begin{methods} \method{havelHakimi}{findet Graph}{(\abs{V}+\abs{E})\cdot\log(\abs{V})} @@ -222,7 +234,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \subsection{Max-Flow} \optional{ - \subsubsection{Push Relabel} + \subsubsection{Push Relabel \opthint} \begin{methods} \method{maxFlow}{gut bei sehr dicht besetzten Graphen.}{\abs{V}^2\*\sqrt{\abs{E}}} \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} @@ -230,22 +242,21 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \sourcecode{graph/pushRelabel.cpp} } +\subsubsection{\textsc{Dinitz}'s Algorithm mit Capacity Scaling} +\begin{methods} + \method{maxFlow}{doppelt so schnell wie \textsc{Ford-Fulkerson}}{\abs{V}^2\cdot\abs{E}} + \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} +\end{methods} +\sourcecode{graph/dinitzScaling.cpp} + \begin{algorithm}{Min-Cost-Max-Flow} \begin{methods} \method{mincostflow}{berechnet Fluss}{\abs{V}^2\cdot\abs{E}^2} \end{methods} \sourcecode{graph/minCostMaxFlow.cpp} \end{algorithm} -\vfill\null \columnbreak -\subsubsection{Dinic's Algorithm mit Capacity Scaling} -\begin{methods} - \method{maxFlow}{doppelt so schnell wie Ford Fulkerson}{\abs{V}^2\cdot\abs{E}} - \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} -\end{methods} -\sourcecode{graph/dinicScaling.cpp} - \optional{ \subsubsection{Anwendungen} \begin{itemize} diff --git a/content/graph/havelHakimi.cpp b/content/graph/havelHakimi.cpp index ac4d67d..9f4c081 100644 --- a/content/graph/havelHakimi.cpp +++ b/content/graph/havelHakimi.cpp @@ -1,12 +1,12 @@ vector<vector<int>> havelHakimi(const vector<int>& deg) { priority_queue<pair<int, int>> pq; - for (int i = 0; i < sz(deg); i++) { + for (int i = 0; i < ssize(deg); i++) { if (deg[i] > 0) pq.push({deg[i], i}); } - vector<vector<int>> adj(sz(deg)); + vector<vector<int>> adj(ssize(deg)); while (!pq.empty()) { auto [degV, v] = pq.top(); pq.pop(); - if (sz(pq) < degV) return {}; //impossible + if (ssize(pq) < degV) return {}; //impossible vector<pair<int, int>> todo(degV); for (auto& e : todo) e = pq.top(), pq.pop(); for (auto [degU, u] : todo) { diff --git a/content/graph/hld.cpp b/content/graph/hld.cpp index 65d3f5c..e365b13 100644 --- a/content/graph/hld.cpp +++ b/content/graph/hld.cpp @@ -21,7 +21,7 @@ void dfs_hld(int v = 0, int from = -1) { } void init(int root = 0) { - int n = sz(adj); + int n = ssize(adj); sz.assign(n, 1), nxt.assign(n, root), par.assign(n, -1); in.resize(n), out.resize(n); counter = 0; diff --git a/content/graph/hopcroftKarp.cpp b/content/graph/hopcroftKarp.cpp index c1f5d1c..d07bd3a 100644 --- a/content/graph/hopcroftKarp.cpp +++ b/content/graph/hopcroftKarp.cpp @@ -5,14 +5,14 @@ vector<int> pairs, dist, ptr; bool bfs(int l) { queue<int> q; for(int v = 0; v < l; v++) { - if (pairs[v] < 0) {dist[v] = 0; q.push(v);} + if (pairs[v] < 0) { dist[v] = 0; q.push(v); } else dist[v] = -1; } bool exist = false; while(!q.empty()) { int v = q.front(); q.pop(); for (int u : adj[v]) { - if (pairs[u] < 0) {exist = true; continue;} + if (pairs[u] < 0) { exist = true; continue; } if (dist[pairs[u]] < 0) { dist[pairs[u]] = dist[v] + 1; q.push(pairs[u]); @@ -21,7 +21,7 @@ bool bfs(int l) { } bool dfs(int v) { - for (; ptr[v] < sz(adj[v]); ptr[v]++) { + for (; ptr[v] < ssize(adj[v]); ptr[v]++) { int u = adj[v][ptr[v]]; if (pairs[u] < 0 || (dist[pairs[u]] > dist[v] && dfs(pairs[u]))) { @@ -33,7 +33,7 @@ bool dfs(int v) { int hopcroft_karp(int l) { // l = #Knoten links int ans = 0; - pairs.assign(sz(adj), -1); + pairs.assign(ssize(adj), -1); dist.resize(l); // Greedy Matching, optionale Beschleunigung. for (int v = 0; v < l; v++) for (int u : adj[v]) diff --git a/content/graph/kruskal.cpp b/content/graph/kruskal.cpp index 987d30b..98a2682 100644 --- a/content/graph/kruskal.cpp +++ b/content/graph/kruskal.cpp @@ -1,9 +1,11 @@ -sort(all(edges)); -vector<Edge> mst; -ll cost = 0; -for (Edge& e : edges) { - if (findSet(e.from) != findSet(e.to)) { - unionSets(e.from, e.to); - mst.push_back(e); - cost += e.cost; -}} +ll kruskal(int n, vector<Edge> edges, vector<Edge> &mst) { + ranges::sort(edges, less{}); + ll cost = 0; + UnionFind uf(n); // union find @\sourceref{datastructures/unionFind.cpp}@ + for (Edge &e: edges) { + if (uf.link(e.from, e.to)) { + mst.push_back(e); + cost += e.cost; + }} + return cost; +} diff --git a/content/graph/maxCarBiMatch.cpp b/content/graph/kuhn.cpp index e928387..688c846 100644 --- a/content/graph/maxCarBiMatch.cpp +++ b/content/graph/kuhn.cpp @@ -12,7 +12,7 @@ bool dfs(int v) { } int kuhn(int l) { // l = #Knoten links. - pairs.assign(sz(adj), -1); + pairs.assign(ssize(adj), -1); int ans = 0; // Greedy Matching. Optionale Beschleunigung. for (int v = 0; v < l; v++) for (int u : adj[v]) diff --git a/content/graph/matching.cpp b/content/graph/matching.cpp index 1e450c0..f0f34a3 100644 --- a/content/graph/matching.cpp +++ b/content/graph/matching.cpp @@ -1,22 +1,17 @@ -constexpr int mod=1'000'000'007, I=10; -vector<vector<ll>> adj, mat; +constexpr int mod = 1'000'000'007, I = 10; -int max_matching() { +int max_matching(const vector<vector<int>> &adj) { int ans = 0; - mat.assign(sz(adj), {}); + vector<vector<ll>> mat(ssize(adj)); for (int _ = 0; _ < I; _++) { - for (int v = 0; v < sz(adj); v++) { - mat[v].assign(sz(adj), 0); + for (int v = 0; v < ssize(adj); v++) { + mat[v].assign(ssize(adj), 0); for (int u : adj[v]) { if (u < v) { mat[v][u] = rand() % (mod - 1) + 1; mat[u][v] = mod - mat[v][u]; }}} - gauss(sz(mat), sz(mat[0])); //LGS @\sourceref{math/lgsFp.cpp}@ - int rank = 0; - for (auto& row : mat) { - if (*max_element(all(row)) != 0) rank++; - } + int rank = ssize(gauss(mat)); // LGS @\sourceref{math/lgsFp.cpp}@ ans = max(ans, rank / 2); } return ans; diff --git a/content/graph/maxWeightBipartiteMatching.cpp b/content/graph/maxWeightBipartiteMatching.cpp index a2b0a80..b6f6ddf 100644 --- a/content/graph/maxWeightBipartiteMatching.cpp +++ b/content/graph/maxWeightBipartiteMatching.cpp @@ -45,6 +45,6 @@ double match(int l, int r) { yx[y] = aug[y]; swap(y, xy[aug[y]]); }} - return accumulate(all(lx), 0.0) + - accumulate(all(ly), 0.0); // Wert des Matchings + return accumulate(begin(lx), end(lx), 0.0) + + accumulate(begin(ly), end(ly), 0.0); // Wert des Matchings } diff --git a/content/graph/minCostMaxFlow.cpp b/content/graph/minCostMaxFlow.cpp index 14a222c..fde95f3 100644 --- a/content/graph/minCostMaxFlow.cpp +++ b/content/graph/minCostMaxFlow.cpp @@ -15,16 +15,16 @@ struct MinCostFlow { adj(n), s(source), t(target) {}; void addEdge(int u, int v, ll c, ll cost) { - adj[u].push_back(sz(edges)); + adj[u].push_back(ssize(edges)); edges.push_back({v, c, cost}); - adj[v].push_back(sz(edges)); + adj[v].push_back(ssize(edges)); edges.push_back({u, 0, -cost}); } bool SPFA() { - pref.assign(sz(adj), -1); - dist.assign(sz(adj), INF); - vector<bool> inqueue(sz(adj)); + pref.assign(ssize(adj), -1); + dist.assign(ssize(adj), INF); + vector<bool> inqueue(ssize(adj)); queue<int> queue; dist[s] = 0; queue.push(s); @@ -59,7 +59,7 @@ struct MinCostFlow { }} void mincostflow() { - con.assign(sz(adj), 0); + con.assign(ssize(adj), 0); maxflow = mincost = 0; while (SPFA()) extend(); } diff --git a/content/graph/pushRelabel.cpp b/content/graph/pushRelabel.cpp index ec36026..c569df2 100644 --- a/content/graph/pushRelabel.cpp +++ b/content/graph/pushRelabel.cpp @@ -9,8 +9,8 @@ vector<ll> ec; vector<int> cur, H; void addEdge(int u, int v, ll c) { - adj[u].push_back({v, (int)sz(adj[v]), 0, c}); - adj[v].push_back({u, (int)sz(adj[u])-1, 0, 0}); + adj[u].push_back({v, (int)ssize(adj[v]), 0, c}); + adj[v].push_back({u, (int)ssize(adj[u])-1, 0, 0}); } void addFlow(Edge& e, ll f) { @@ -23,7 +23,7 @@ void addFlow(Edge& e, ll f) { } ll maxFlow(int s, int t) { - int n = sz(adj); + int n = ssize(adj); hs.assign(2*n, {}); ec.assign(n, 0); cur.assign(n, 0); @@ -38,9 +38,9 @@ ll maxFlow(int s, int t) { int v = hs[hi].back(); hs[hi].pop_back(); while (ec[v] > 0) { - if (cur[v] == sz(adj[v])) { + if (cur[v] == ssize(adj[v])) { H[v] = 2*n; - for (int i = 0; i < sz(adj[v]); i++) { + for (int i = 0; i < ssize(adj[v]); i++) { Edge& e = adj[v][i]; if (e.c - e.f > 0 && H[v] > H[e.to] + 1) { diff --git a/content/graph/reroot.cpp b/content/graph/reroot.cpp index 379c839..5a9c9d1 100644 --- a/content/graph/reroot.cpp +++ b/content/graph/reroot.cpp @@ -26,11 +26,11 @@ struct Reroot { pref.push_back(takeChild(v, u, w, dp[u])); } auto suf = pref; - partial_sum(all(pref), pref.begin(), comb); + partial_sum(begin(pref), end(pref), begin(pref), comb); exclusive_scan(suf.rbegin(), suf.rend(), suf.rbegin(), E, comb); - for (int i = 0; i < sz(adj[v]); i++) { + for (int i = 0; i < ssize(adj[v]); i++) { auto [u, w] = adj[v][i]; if (u == from) continue; dp[v] = fin(v, comb(pref[i], suf[i + 1])); @@ -40,7 +40,7 @@ struct Reroot { } auto solve() { - dp.assign(sz(adj), E); + dp.assign(ssize(adj), E); dfs0(0); dfs1(0); return dp; diff --git a/content/graph/scc.cpp b/content/graph/scc.cpp index a6af7d6..a9e10c1 100644 --- a/content/graph/scc.cpp +++ b/content/graph/scc.cpp @@ -1,27 +1,25 @@ -vector<vector<int>> adj; -int sccCounter; -vector<int> low, idx, s; //idx enthält Index der SCC pro Knoten. +struct SCC { + vector<int> idx; // idx enthält Index der SCC pro Knoten + vector<vector<int>> sccs; // Liste der Knoten pro SCC -void visit(int v) { - int old = low[v] = sz(s); - s.push_back(v); + SCC(const vector<vector<int>> &adj): idx(ssize(adj), -1) { + vector<int> low(ssize(adj), -1); + vector<int> s; + auto dfs = [&](auto &&self, int v) -> void { + int old = low[v] = ssize(s); + s.push_back(v); - for (auto u : adj[v]) { - if (low[u] < 0) visit(u); - if (idx[u] < 0) low[v] = min(low[v], low[u]); - } + for (auto u : adj[v]) { + if (low[u] < 0) self(self, u); + if (idx[u] < 0) low[v] = min(low[v], low[u]); + } - if (old == low[v]) { - for (int i = old; i < sz(s); i++) idx[s[i]] = sccCounter; - sccCounter++; - s.resize(old); -}} - -void scc() { - low.assign(sz(adj), -1); - idx.assign(sz(adj), -1); - - sccCounter = 0; - for (int i = 0; i < sz(adj); i++) { - if (low[i] < 0) visit(i); -}} + if (old == low[v]) { + sccs.emplace_back(begin(s) + old, end(s)); + for (int u: sccs.back()) idx[u] = ssize(sccs)-1; + s.resize(old); + }}; + for (int i = 0; i < ssize(adj); i++) { + if (low[i] < 0) dfs(dfs, i); + }} +}; diff --git a/content/graph/stoerWagner.cpp b/content/graph/stoerWagner.cpp index 97e667a..a122488 100644 --- a/content/graph/stoerWagner.cpp +++ b/content/graph/stoerWagner.cpp @@ -7,7 +7,7 @@ vector<vector<Edge>> adj, tmp; vector<bool> erased; void merge(int u, int v) { - tmp[u].insert(tmp[u].end(), all(tmp[v])); + tmp[u].insert(end(tmp[u]), begin(tmp[v]), end(tmp[v])); tmp[v].clear(); erased[v] = true; for (auto& vec : tmp) { @@ -19,33 +19,33 @@ void merge(int u, int v) { ll stoer_wagner() { ll res = INF; tmp = adj; - erased.assign(sz(tmp), false); - for (int i = 1; i < sz(tmp); i++) { + erased.assign(ssize(tmp), false); + for (int i = 1; i < ssize(tmp); i++) { int s = 0; while (erased[s]) s++; priority_queue<pair<ll, int>> pq; pq.push({0, s}); - vector<ll> con(sz(tmp)); + vector<ll> con(ssize(tmp)); ll cur = 0; vector<pair<ll, int>> state; while (!pq.empty()) { int c = pq.top().second; pq.pop(); - if (con[c] < 0) continue; //already seen + if (con[c] < 0) continue; // already seen con[c] = -1; for (auto e : tmp[c]) { - if (con[e.to] >= 0) {//add edge to cut + if (con[e.to] >= 0) { // add edge to cut con[e.to] += e.cap; pq.push({con[e.to], e.to}); cur += e.cap; - } else if (e.to != c) {//remove edge from cut + } else if (e.to != c) { // remove edge from cut cur -= e.cap; }} state.push_back({cur, c}); } int t = state.back().second; state.pop_back(); - if (state.empty()) return 0; //graph is not connected?! + if (state.empty()) return 0; // graph is not connected?! merge(state.back().second, t); res = min(res, state.back().first); } diff --git a/content/graph/treeIsomorphism.cpp b/content/graph/treeIsomorphism.cpp index 355fefb..8c2ca21 100644 --- a/content/graph/treeIsomorphism.cpp +++ b/content/graph/treeIsomorphism.cpp @@ -7,9 +7,9 @@ int treeLabel(int v, int from = -1) { if (u == from) continue; children.push_back(treeLabel(u, v)); } - sort(all(children)); + ranges::sort(children); if (known.find(children) == known.end()) { - known[children] = sz(known); + known[children] = ssize(known); } return known[children]; } diff --git a/content/graph/virtualTree.cpp b/content/graph/virtualTree.cpp index 6233b27..81ba001 100644 --- a/content/graph/virtualTree.cpp +++ b/content/graph/virtualTree.cpp @@ -2,14 +2,14 @@ vector<int> in, out; void virtualTree(vector<int> ind) { // indices of used nodes - sort(all(ind), [&](int x, int y) {return in[x] < in[y];}); - for (int i = 1, n = sz(ind); i < n; i++) { + ranges::sort(ind, {}, [&](int x) { return in[x]; }); + for (int i = 1, n = ssize(ind); i < n; i++) { ind.push_back(lca(ind[i - 1], ind[i])); } - sort(all(ind), [&](int x, int y) {return in[x] < in[y];}); - ind.erase(unique(all(ind)), ind.end()); + ranges::sort(ind, {}, [&](int x) { return in[x]; }); + ind.erase(begin(ranges::unique(ind)), end(ind)); - int n = sz(ind); + int n = ssize(ind); vector<vector<int>> tree(n); vector<int> st = {0}; for (int i = 1; i < n; i++) { diff --git a/content/latexHeaders/code.sty b/content/latexHeaders/code.sty index 3ebdda3..8a600c5 100644 --- a/content/latexHeaders/code.sty +++ b/content/latexHeaders/code.sty @@ -1,3 +1,6 @@ +\usepackage{ocgx2} +\usepackage{fontawesome} + % Colors, used for syntax highlighting. % To print this document, set all colors to black! \usepackage{xcolor} @@ -101,6 +104,32 @@ % \addtocounter{lstnumber}{-1}% %} +\ifthenelse{\isundefined{\srclink}}{}{ + \lst@AddToHook{Init}{% + \ifthenelse{\equal{\lst@name}{}}{}{% + \begin{minipage}[t][0pt]{\linewidth}% + \vspace{0pt}% + \hfill% + \begin{ocg}[printocg=never]{Source links}{srclinks}{1}% + \hfill\href{\srclink{\lst@name}}{\faExternalLink}% + \end{ocg}% + \end{minipage}% + }% + } +} + +\lst@AddToHook{DeInit}{% + \ifthenelse{\equal{\lst@name}{}}{}{% + \begin{minipage}[b][0pt]{\linewidth}% + \vspace{0pt}% + \hfill% + \begin{ocg}[printocg=never]{Source file names}{srcfiles}{0}% + \hfill\textcolor{gray}{\lst@name}% + \end{ocg}% + \end{minipage}% + }% +} + \newenvironment{btHighlight}[1][] {\begingroup\tikzset{bt@Highlight@par/.style={#1}}\begin{lrbox}{\@tempboxa}} {\end{lrbox}\bt@HL@box[bt@Highlight@par]{\@tempboxa}\endgroup} diff --git a/content/latexHeaders/commands.sty b/content/latexHeaders/commands.sty index edbba1b..73a7dca 100644 --- a/content/latexHeaders/commands.sty +++ b/content/latexHeaders/commands.sty @@ -7,6 +7,11 @@ \newcommand{\code}[1]{\lstinline[breaklines=true]{#1}} \let\codeSafe\lstinline +\ifoptional + \renewcommand{\columnbreak}{} + \newcommand\opthint{\textcolor{gray}{(optional)}} +\fi + \usepackage{tikz} \usetikzlibrary{angles,quotes} @@ -17,7 +22,7 @@ \ifthenelse{\equal{#1}{optional}}{% \optional{ \needspace{4\baselineskip}% - \subsection{#2\textcolor{gray}{(optional)}}% + \subsection{#2 \opthint}% #3% } }{% diff --git a/content/latexHeaders/math.sty b/content/latexHeaders/math.sty index d758f71..8219782 100644 --- a/content/latexHeaders/math.sty +++ b/content/latexHeaders/math.sty @@ -41,7 +41,7 @@ \end{matrix} \Bigr) } -% Euler numbers, first kind. +% Eulerien numbers, first order. \newcommand{\eulerI}[2]{ \Bigl\langle \begin{matrix} @@ -50,15 +50,6 @@ \end{matrix} \Bigr\rangle } -% Euler numbers, second kind. -\newcommand{\eulerII}[2]{ - \Bigl\langle\mkern-4mu\Bigl\langle - \begin{matrix} - #1 \\ - #2 - \end{matrix} - \Bigr\rangle\mkern-4mu\Bigr\rangle -} % Stirling numbers, first kind. \newcommand{\stirlingI}[2]{ \Bigl[ diff --git a/content/latexmk.opt b/content/latexmk.opt new file mode 100644 index 0000000..88d3463 --- /dev/null +++ b/content/latexmk.opt @@ -0,0 +1,2 @@ +$jobname = 'tcr-opt'; +$pre_tex_code .= '\def\OPTIONAL{}' diff --git a/content/latexmkrc b/content/latexmkrc new file mode 100644 index 0000000..b43f9a2 --- /dev/null +++ b/content/latexmkrc @@ -0,0 +1,13 @@ +@default_files = qw(tcr); +$pdf_mode = 1; +$aux_dir = "."; +$out_dir = ".."; +{ + my $commit = `git rev-parse HEAD`; + chomp $commit; + $pre_tex_code .= + '\newcommand{\srclink}[1]' + .'{https://git.gloria-mundi.eu/tcr/plain/content/#1?id='.$commit.'}'; +} +&alt_tex_cmds; +$jobname = 'tcr'; diff --git a/content/math/berlekampMassey.cpp b/content/math/berlekampMassey.cpp index 29e084f..85a1031 100644 --- a/content/math/berlekampMassey.cpp +++ b/content/math/berlekampMassey.cpp @@ -1,6 +1,6 @@ constexpr ll mod = 1'000'000'007; vector<ll> BerlekampMassey(const vector<ll>& s) { - int n = sz(s), L = 0, m = 0; + int n = ssize(s), L = 0, m = 0; vector<ll> C(n), B(n), T; C[0] = B[0] = 1; diff --git a/content/math/bigint.cpp b/content/math/bigint.cpp index 1b3b953..a40f515 100644 --- a/content/math/bigint.cpp +++ b/content/math/bigint.cpp @@ -7,9 +7,9 @@ struct bigint { bigint() : sign(1) {} - bigint(ll v) {*this = v;} + bigint(ll v) { *this = v; } - bigint(const string &s) {read(s);} + bigint(const string &s) { read(s); } void operator=(ll v) { sign = 1; @@ -22,10 +22,11 @@ struct bigint { bigint operator+(const bigint& v) const { if (sign == v.sign) { bigint res = v; - for (ll i = 0, carry = 0; i < max(sz(a), sz(v.a)) || carry; ++i) { - if (i == sz(res.a)) + for (ll i = 0, carry = 0; + i < max(ssize(a), ssize(v.a)) || carry; ++i) { + if (i == ssize(res.a)) res.a.push_back(0); - res.a[i] += carry + (i < sz(a) ? a[i] : 0); + res.a[i] += carry + (i < ssize(a) ? a[i] : 0); carry = res.a[i] >= base; if (carry) res.a[i] -= base; @@ -39,8 +40,8 @@ struct bigint { if (sign == v.sign) { if (abs() >= v.abs()) { bigint res = *this; - for (ll i = 0, carry = 0; i < sz(v.a) || carry; ++i) { - res.a[i] -= carry + (i < sz(v.a) ? v.a[i] : 0); + for (ll i = 0, carry = 0; i < ssize(v.a) || carry; ++i) { + res.a[i] -= carry + (i < ssize(v.a) ? v.a[i] : 0); carry = res.a[i] < 0; if (carry) res.a[i] += base; } @@ -54,8 +55,8 @@ struct bigint { void operator*=(ll v) { if (v < 0) sign = -sign, v = -v; - for (ll i = 0, carry = 0; i < sz(a) || carry; ++i) { - if (i == sz(a)) a.push_back(0); + for (ll i = 0, carry = 0; i < ssize(a) || carry; ++i) { + if (i == ssize(a)) a.push_back(0); ll cur = a[i] * v + carry; carry = cur / base; a[i] = cur % base; @@ -74,12 +75,12 @@ struct bigint { bigint a = a1.abs() * norm; bigint b = b1.abs() * norm; bigint q, r; - q.a.resize(sz(a.a)); - for (ll i = sz(a.a) - 1; i >= 0; i--) { + q.a.resize(ssize(a.a)); + for (ll i = ssize(a.a) - 1; i >= 0; i--) { r *= base; r += a.a[i]; - ll s1 = sz(r.a) <= sz(b.a) ? 0 : r.a[sz(b.a)]; - ll s2 = sz(r.a) <= sz(b.a) - 1 ? 0 : r.a[sz(b.a) - 1]; + ll s1 = ssize(r.a) <= ssize(b.a) ? 0 : r.a[ssize(b.a)]; + ll s2 = ssize(r.a) <= ssize(b.a) - 1 ? 0 : r.a[ssize(b.a) - 1]; ll d = (base * s1 + s2) / b.a.back(); r -= b * d; while (r < 0) r += b, --d; @@ -102,7 +103,7 @@ struct bigint { void operator/=(ll v) { if (v < 0) sign = -sign, v = -v; - for (ll i = sz(a) - 1, rem = 0; i >= 0; --i) { + for (ll i = ssize(a) - 1, rem = 0; i >= 0; --i) { ll cur = a[i] + rem * base; a[i] = cur / v; rem = cur % v; @@ -119,7 +120,7 @@ struct bigint { ll operator%(ll v) const { if (v < 0) v = -v; ll m = 0; - for (ll i = sz(a) - 1; i >= 0; --i) + for (ll i = ssize(a) - 1; i >= 0; --i) m = (a[i] + m * base) % v; return m * sign; } @@ -139,9 +140,9 @@ struct bigint { bool operator<(const bigint& v) const { if (sign != v.sign) return sign < v.sign; - if (sz(a) != sz(v.a)) - return sz(a) * sign < sz(v.a) * v.sign; - for (ll i = sz(a) - 1; i >= 0; i--) + if (ssize(a) != ssize(v.a)) + return ssize(a) * sign < ssize(v.a) * v.sign; + for (ll i = ssize(a) - 1; i >= 0; i--) if (a[i] != v.a[i]) return a[i] * sign < v.a[i] * sign; return false; @@ -169,7 +170,7 @@ struct bigint { } bool isZero() const { - return a.empty() || (sz(a) == 1 && a[0] == 0); + return a.empty() || (ssize(a) == 1 && a[0] == 0); } bigint operator-() const { @@ -186,7 +187,7 @@ struct bigint { ll longValue() const { ll res = 0; - for (ll i = sz(a) - 1; i >= 0; i--) + for (ll i = ssize(a) - 1; i >= 0; i--) res = res * base + a[i]; return res * sign; } @@ -195,11 +196,11 @@ struct bigint { sign = 1; a.clear(); ll pos = 0; - while (pos < sz(s) && (s[pos] == '-' || s[pos] == '+')) { + while (pos < ssize(s) && (s[pos] == '-' || s[pos] == '+')) { if (s[pos] == '-') sign = -sign; ++pos; } - for (ll i = sz(s) - 1; i >= pos; i -= base_digits) { + for (ll i = ssize(s) - 1; i >= pos; i -= base_digits) { ll x = 0; for (ll j = max(pos, i - base_digits + 1); j <= i; j++) x = x * 10 + s[j] - '0'; @@ -218,13 +219,13 @@ struct bigint { friend ostream& operator<<(ostream& stream, const bigint& v) { if (v.sign == -1) stream << '-'; stream << (v.a.empty() ? 0 : v.a.back()); - for (ll i = sz(v.a) - 2; i >= 0; --i) + for (ll i = ssize(v.a) - 2; i >= 0; --i) stream << setw(base_digits) << setfill('0') << v.a[i]; return stream; } static vll karatsubaMultiply(const vll& a, const vll& b) { - ll n = sz(a); + ll n = ssize(a); vll res(n + n); if (n <= 32) { for (ll i = 0; i < n; i++) @@ -242,25 +243,25 @@ struct bigint { for (ll i = 0; i < k; i++) a2[i] += a1[i]; for (ll i = 0; i < k; i++) b2[i] += b1[i]; vll r = karatsubaMultiply(a2, b2); - for (ll i = 0; i < sz(a1b1); i++) r[i] -= a1b1[i]; - for (ll i = 0; i < sz(a2b2); i++) r[i] -= a2b2[i]; - for (ll i = 0; i < sz(r); i++) res[i + k] += r[i]; - for (ll i = 0; i < sz(a1b1); i++) res[i] += a1b1[i]; - for (ll i = 0; i < sz(a2b2); i++) res[i + n] += a2b2[i]; + for (ll i = 0; i < ssize(a1b1); i++) r[i] -= a1b1[i]; + for (ll i = 0; i < ssize(a2b2); i++) r[i] -= a2b2[i]; + for (ll i = 0; i < ssize(r); i++) res[i + k] += r[i]; + for (ll i = 0; i < ssize(a1b1); i++) res[i] += a1b1[i]; + for (ll i = 0; i < ssize(a2b2); i++) res[i + n] += a2b2[i]; return res; } bigint operator*(const bigint& v) const { vll ta(a.begin(), a.end()); vll va(v.a.begin(), v.a.end()); - while (sz(ta) < sz(va)) ta.push_back(0); - while (sz(va) < sz(ta)) va.push_back(0); - while (sz(ta) & (sz(ta) - 1)) + while (ssize(ta) < ssize(va)) ta.push_back(0); + while (ssize(va) < ssize(ta)) va.push_back(0); + while (ssize(ta) & (ssize(ta) - 1)) ta.push_back(0), va.push_back(0); vll ra = karatsubaMultiply(ta, va); bigint res; res.sign = sign * v.sign; - for (ll i = 0, carry = 0; i < sz(ra); i++) { + for (ll i = 0, carry = 0; i < ssize(ra); i++) { ll cur = ra[i] + carry; res.a.push_back(cur % base); carry = cur / base; diff --git a/content/math/binomial0.cpp b/content/math/binomial0.cpp index 5f2ccaa..8b436a2 100644 --- a/content/math/binomial0.cpp +++ b/content/math/binomial0.cpp @@ -8,7 +8,7 @@ void precalc() { for (int i = lim - 1; i > 0; i--) inv[i-1] = inv[i] * i % mod; } -ll calc_binom(ll n, ll k) { +ll binom(ll n, ll k) { if (n < 0 || n < k || k < 0) return 0; - return (inv[k] * inv[n-k] % mod) * fac[n] % mod; + return (fac[n] * inv[n-k] % mod) * inv[k] % mod; } diff --git a/content/math/binomial1.cpp b/content/math/binomial1.cpp index dab20b3..34f13ed 100644 --- a/content/math/binomial1.cpp +++ b/content/math/binomial1.cpp @@ -1,7 +1,7 @@ -ll calc_binom(ll n, ll k) { +ll binom(ll n, ll k) { if (k > n) return 0; ll r = 1; - for (ll d = 1; d <= k; d++) {// Reihenfolge => Teilbarkeit + for (ll d = 1; d <= k; d++) { // Reihenfolge => Teilbarkeit r *= n--, r /= d; } return r; diff --git a/content/math/binomial2.cpp b/content/math/binomial2.cpp index 4531505..bb6a035 100644 --- a/content/math/binomial2.cpp +++ b/content/math/binomial2.cpp @@ -20,7 +20,7 @@ ll binomPPow(ll n, ll k, ll p) { return res; } -ll calc_binom(ll n, ll k) { +ll binom(ll n, ll k) { if (k > n) return 0; ll res = 1; k = min(k, n - k); diff --git a/content/math/binomial3.cpp b/content/math/binomial3.cpp index 7a6ab4e..8a51dac 100644 --- a/content/math/binomial3.cpp +++ b/content/math/binomial3.cpp @@ -1,4 +1,4 @@ -ll calc_binom(ll n, ll k, ll p) { +ll binom(ll n, ll k, ll p) { assert(n < p); //wichtig: sonst falsch! if (k > n) return 0; ll x = k % 2 != 0 ? p-1 : 1; diff --git a/content/math/discreteLogarithm.cpp b/content/math/discreteLogarithm.cpp index 68866e0..844bd27 100644 --- a/content/math/discreteLogarithm.cpp +++ b/content/math/discreteLogarithm.cpp @@ -5,11 +5,11 @@ ll dlog(ll a, ll b, ll m) { //a > 0! vals[i] = {e, i}; } vals.emplace_back(m, 0); - sort(all(vals)); + ranges::sort(vals); ll fact = powMod(a, m - bound - 1, m); for (ll i = 0; i < m; i += bound, b = (b * fact) % m) { - auto it = lower_bound(all(vals), pair<ll, ll>{b, 0}); + auto it = ranges::lower_bound(vals, pair<ll, ll>{b, 0}); if (it->first == b) { return (i + it->second) % m; }} diff --git a/content/math/divisors.cpp b/content/math/divisors.cpp index 5afd4fb..2a17f54 100644 --- a/content/math/divisors.cpp +++ b/content/math/divisors.cpp @@ -2,7 +2,7 @@ ll countDivisors(ll n) { ll res = 1; for (ll i = 2; i * i * i <= n; i++) { ll c = 0; - while (n % i == 0) {n /= i; c++;} + while (n % i == 0) { n /= i; c++; } res *= c + 1; } if (isPrime(n)) res *= 2; diff --git a/content/math/gauss.cpp b/content/math/gauss.cpp index d431e52..719f573 100644 --- a/content/math/gauss.cpp +++ b/content/math/gauss.cpp @@ -7,7 +7,7 @@ void takeAll(int n, int line) { for (int i = 0; i < n; i++) { if (i == line) continue; double diff = mat[i][line]; - for (int j = 0; j < sz(mat[i]); j++) { + for (int j = 0; j < ssize(mat[i]); j++) { mat[i][j] -= diff * mat[line][j]; }}} @@ -22,7 +22,7 @@ int gauss(int n) { if (abs(mat[i][i]) > EPS) { normalLine(i); takeAll(n, i); - done[i] = true; + done[i] = true; }} for (int i = 0; i < n; i++) { // gauss fertig, prüfe Lösung bool allZero = true; diff --git a/content/math/inversions.cpp b/content/math/inversions.cpp index 9e47f9b..289161f 100644 --- a/content/math/inversions.cpp +++ b/content/math/inversions.cpp @@ -1,7 +1,7 @@ ll inversions(const vector<ll>& v) { Tree<pair<ll, ll>> t; //ordered statistics tree @\sourceref{datastructures/pbds.cpp}@ ll res = 0; - for (ll i = 0; i < sz(v); i++) { + for (ll i = 0; i < ssize(v); i++) { res += i - t.order_of_key({v[i], i}); t.insert({v[i], i}); } diff --git a/content/math/inversionsMerge.cpp b/content/math/inversionsMerge.cpp index 8235b11..50fe37b 100644 --- a/content/math/inversionsMerge.cpp +++ b/content/math/inversionsMerge.cpp @@ -2,26 +2,26 @@ ll merge(vector<ll>& v, vector<ll>& left, vector<ll>& right) { int a = 0, b = 0, i = 0; ll inv = 0; - while (a < sz(left) && b < sz(right)) { + while (a < ssize(left) && b < ssize(right)) { if (left[a] < right[b]) v[i++] = left[a++]; else { - inv += sz(left) - a; + inv += ssize(left) - a; v[i++] = right[b++]; } } - while (a < sz(left)) v[i++] = left[a++]; - while (b < sz(right)) v[i++] = right[b++]; + while (a < ssize(left)) v[i++] = left[a++]; + while (b < ssize(right)) v[i++] = right[b++]; return inv; } ll mergeSort(vector<ll> &v) { // Sortiert v und gibt Inversionszahl zurück. - int n = sz(v); + int n = ssize(v); vector<ll> left(n / 2), right((n + 1) / 2); for (int i = 0; i < n / 2; i++) left[i] = v[i]; for (int i = n / 2; i < n; i++) right[i - n / 2] = v[i]; ll result = 0; - if (sz(left) > 1) result += mergeSort(left); - if (sz(right) > 1) result += mergeSort(right); + if (ssize(left) > 1) result += mergeSort(left); + if (ssize(right) > 1) result += mergeSort(right); return result + merge(v, left, right); } diff --git a/content/math/lgsFp.cpp b/content/math/lgsFp.cpp index 4c12477..5028782 100644 --- a/content/math/lgsFp.cpp +++ b/content/math/lgsFp.cpp @@ -1,6 +1,7 @@ -vector<int> pivots; // ith pivot is in ith row -void gauss(int n, int m) { - for (int r = 0, c = 0; c < m; c++) { +vector<int> gauss(vector<vector<ll>> &mat) { + int n = ssize(mat), m = ssize(mat[0]); + vector<int> pivots; // ith pivot is in ith row + for (int r = 0, c = 0; r < n && c < m; c++) { for (int i = r; i < n; i++) { if (mat[i][c] != 0){ swap(mat[r], mat[i]); @@ -16,5 +17,7 @@ void gauss(int n, int m) { mat[i][j] = (mat[i][j] - f*mat[r][j] % mod + mod) % mod; }} pivots.push_back(c); - if (++r == n) break; -}} // no solution if pivots.back() == m-1 + r++; + } + return pivots; // no solution if pivots.back() == m-1 +} diff --git a/content/math/linearRecurrence.cpp b/content/math/linearRecurrence.cpp index a8adacd..eb04566 100644 --- a/content/math/linearRecurrence.cpp +++ b/content/math/linearRecurrence.cpp @@ -1,9 +1,9 @@ constexpr ll mod = 998244353; // oder ntt mul @\sourceref{math/transforms/ntt.cpp}@ vector<ll> mul(const vector<ll>& a, const vector<ll>& b) { - vector<ll> c(sz(a) + sz(b) - 1); - for (int i = 0; i < sz(a); i++) { - for (int j = 0; j < sz(b); j++) { + vector<ll> c(ssize(a) + ssize(b) - 1); + for (int i = 0; i < ssize(a); i++) { + for (int j = 0; j < ssize(b); j++) { c[i+j] += a[i]*b[j] % mod; }} for (ll& x : c) x %= mod; @@ -11,7 +11,7 @@ vector<ll> mul(const vector<ll>& a, const vector<ll>& b) { } ll kthTerm(const vector<ll>& f, const vector<ll>& c, ll k) { - int n = sz(c); + int n = ssize(c); vector<ll> q(n + 1, 1); for (int i = 0; i < n; i++) q[i + 1] = (mod - c[i]) % mod; vector<ll> p = mul(f, q); diff --git a/content/math/linearRecurrenceOld.cpp b/content/math/linearRecurrenceOld.cpp index 2501e64..f67398d 100644 --- a/content/math/linearRecurrenceOld.cpp +++ b/content/math/linearRecurrenceOld.cpp @@ -1,7 +1,7 @@ constexpr ll mod = 1'000'000'007; vector<ll> modMul(const vector<ll>& a, const vector<ll>& b, const vector<ll>& c) { - ll n = sz(c); + ll n = ssize(c); vector<ll> res(n * 2 + 1); for (int i = 0; i <= n; i++) { //a*b for (int j = 0; j <= n; j++) { @@ -18,8 +18,8 @@ vector<ll> modMul(const vector<ll>& a, const vector<ll>& b, } ll kthTerm(const vector<ll>& f, const vector<ll>& c, ll k) { - assert(sz(f) == sz(c)); - vector<ll> tmp(sz(c) + 1), a(sz(c) + 1); + assert(ssize(f) == ssize(c)); + vector<ll> tmp(ssize(c) + 1), a(ssize(c) + 1); tmp[0] = a[1] = 1; //tmp = (x^k) % c for (k++; k > 0; k /= 2) { @@ -28,6 +28,6 @@ ll kthTerm(const vector<ll>& f, const vector<ll>& c, ll k) { } ll res = 0; - for (int i = 0; i < sz(c); i++) res += (tmp[i+1] * f[i]) % mod; + for (int i = 0; i < ssize(c); i++) res += (tmp[i+1] * f[i]) % mod; return res % mod; } diff --git a/content/math/linearSieve.cpp b/content/math/linearSieve.cpp index 64440dd..2ea1e94 100644 --- a/content/math/linearSieve.cpp +++ b/content/math/linearSieve.cpp @@ -3,12 +3,12 @@ ll small[N], power[N], sieved[N]; vector<ll> primes; //wird aufgerufen mit (p^k, p, k) für prime p und k > 0 -ll mu(ll pk, ll p, ll k) {return -(k == 1);} -ll phi(ll pk, ll p, ll k) {return pk - pk / p;} -ll div(ll pk, ll p, ll k) {return k+1;} -ll divSum(ll pk, ll p, ll k) {return (pk*p-1) / (p - 1);} -ll square(ll pk, ll p, ll k) {return k % 2 ? pk / p : pk;} -ll squareFree(ll pk, ll p, ll k) {return p;} +ll mu(ll pk, ll p, ll k) { return -(k == 1); } +ll phi(ll pk, ll p, ll k) { return pk - pk / p; } +ll div(ll pk, ll p, ll k) { return k+1; } +ll divSum(ll pk, ll p, ll k) { return (pk*p-1) / (p - 1); } +ll square(ll pk, ll p, ll k) { return k % 2 ? pk / p : pk; } +ll squareFree(ll pk, ll p, ll k) { return p; } void sieve() { // O(N) small[1] = power[1] = sieved[1] = 1; diff --git a/content/math/longestIncreasingSubsequence.cpp b/content/math/longestIncreasingSubsequence.cpp index fcb63b4..e4863d0 100644 --- a/content/math/longestIncreasingSubsequence.cpp +++ b/content/math/longestIncreasingSubsequence.cpp @@ -1,8 +1,8 @@ vector<int> lis(vector<ll>& a) { - int n = sz(a), len = 0; + int n = ssize(a), len = 0; vector<ll> dp(n, INF), dp_id(n), prev(n); for (int i = 0; i < n; i++) { - int pos = lower_bound(all(dp), a[i]) - dp.begin(); + int pos = ranges::lower_bound(dp, a[i]) - begin(dp); dp[pos] = a[i]; dp_id[pos] = i; prev[i] = pos ? dp_id[pos - 1] : -1; diff --git a/content/math/math.tex b/content/math/math.tex index c303e85..bcb4275 100644 --- a/content/math/math.tex +++ b/content/math/math.tex @@ -7,7 +7,7 @@ \end{itemize}
\sourcecode{math/longestIncreasingSubsequence.cpp}
\end{algorithm}
-\vfill\null\columnbreak
+\columnbreak
\begin{algorithm}{Zykel Erkennung}
\begin{methods}
@@ -26,21 +26,24 @@ \end{methods}
\sourcecode{math/permIndex.cpp}
\end{algorithm}
-\clearpage
-
-\subsection{Mod-Exponent und Multiplikation über $\boldsymbol{\mathbb{F}_p}$}
-%\vspace{-1.25em}
-%\begin{multicols}{2}
-\method{mulMod}{berechnet $a \cdot b \bmod n$}{\log(b)}
-\sourcecode{math/modMulIterativ.cpp}
-% \vfill\null\columnbreak
-\method{powMod}{berechnet $a^b \bmod n$}{\log(b)}
-\sourcecode{math/modPowIterativ.cpp}
-%\end{multicols}
-%\vspace{-2.75em}
-\begin{itemize}
- \item für $a > 10^9$ \code{__int128} oder \code{modMul} benutzten!
-\end{itemize}
+\columnbreak
+
+\subsection{Potenzen in $\boldsymbol{\mathbb{Z}/n\mathbb{Z}}$}
+ \begin{methods}
+ \method{powMod}{berechnet $a^b \bmod n$}{\log(b)}
+ \end{methods}
+ \sourcecode{math/modPowIterativ.cpp}
+ \begin{itemize}
+ \item für $a > 10^9$ \code{__int128} oder \code{modMul} benutzten!
+ \end{itemize}
+
+\optional{
+\subsection{Multiplikation in $\boldsymbol{\mathbb{Z}/n\mathbb{Z}}$ \opthint}
+ \begin{methods}
+ \method{mulMod}{berechnet $a \cdot b \bmod n$}{\log(b)}
+ \end{methods}
+ \sourcecode{math/modMulIterativ.cpp}
+}
\begin{algorithm}{ggT, kgV, erweiterter euklidischer Algorithmus}
\runtime{\log(a) + \log(b)}
@@ -100,8 +103,8 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: wenn $a\equiv~b \bmod \ggT(m, n)$.
In diesem Fall sind keine Faktoren
auf der linken Seite erlaubt.
- \end{itemize}
- \sourcecode{math/chineseRemainder.cpp}
+ \end{itemize}
+ \sourcecode{math/chineseRemainder.cpp}
\end{algorithm}
\begin{algorithm}{Primzahltest \& Faktorisierung}
@@ -121,7 +124,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \begin{algorithm}{Matrix-Exponent}
\begin{methods}
\method{precalc}{berechnet $m^{2^b}$ vor}{\log(b)\*n^3}
- \method{calc}{berechnet $m^b\cdot$}{\log(b)\cdot n^2}
+ \method{calc}{berechnet $m^b \cdot v$}{\log(b)\cdot n^2}
\end{methods}
\textbf{Tipp:} wenn \code{v[x]=1} und \code{0} sonst, dann ist \code{res[y]} = $m^b_{y,x}$.
\sourcecode{math/matrixPower.cpp}
@@ -236,7 +239,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \sourcecode{math/legendre.cpp}
\end{algorithm}
-\begin{algorithm}{Lineares Sieb und Multiplikative Funktionen}
+\begin{algorithm}{Lineares Sieb und multiplikative Funktionen}
Eine (zahlentheoretische) Funktion $f$ heißt multiplikativ wenn $f(1)=1$ und $f(a\cdot b)=f(a)\cdot f(b)$, falls $\ggT(a,b)=1$.
$\Rightarrow$ Es ist ausreichend $f(p^k)$ für alle primen $p$ und alle $k$ zu kennen.
@@ -250,7 +253,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \textbf{Wichtig:} Sieb rechts ist schneller für \code{isPrime} oder \code{primes}!
\sourcecode{math/linearSieve.cpp}
- \textbf{\textsc{Möbius}-Funktion:}
+ \textbf{\textsc{Möbius} Funktion:}
\begin{itemize}
\item $\mu(n)=+1$, falls $n$ quadratfrei ist und gerade viele Primteiler hat
\item $\mu(n)=-1$, falls $n$ quadratfrei ist und ungerade viele Primteiler hat
@@ -263,7 +266,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \item $p$ prim, $k \in \mathbb{N}$:
$~\varphi(p^k) = p^k - p^{k - 1}$
- \item \textbf{Euler's Theorem:}
+ \item \textbf{\textsc{Euler}'s Theorem:}
Für $b \geq \varphi(c)$ gilt: $a^b \equiv a^{b \bmod \varphi(c) + \varphi(c)} \pmod{c}$. Darüber hinaus gilt: $\gcd(a, c) = 1 \Leftrightarrow a^b \equiv a^{b \bmod \varphi(c)} \pmod{c}$.
Falls $m$ prim ist, liefert das den \textbf{kleinen Satz von \textsc{Fermat}}:
$a^{m} \equiv a \pmod{m}$
@@ -321,21 +324,27 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \end{algorithm}
\begin{algorithm}{Polynome, FFT, NTT \& andere Transformationen}
+ \label{fft}
Multipliziert Polynome $A$ und $B$.
+ \ifthenelse{\isundefined{\srclink}}{}{%
+ \hfill
+ \begin{ocg}[printocg=never]{Source links}{srclinks}{1}%
+ \href{\srclink{math/transforms/}}{\faExternalLink}%
+ \end{ocg}%
+ }
\begin{itemize}
\item $\deg(A \cdot B) = \deg(A) + \deg(B)$
\item Vektoren \code{a} und \code{b} müssen mindestens Größe
$\deg(A \cdot B) + 1$ haben.
Größe muss eine Zweierpotenz sein.
\item Für ganzzahlige Koeffizienten: \code{(ll)round(real(a[i]))}
- \item \emph{xor}, \emph{or} und \emph{and} Transform funktioniert auch mit \code{double} oder modulo einer Primzahl $p$ falls $p \geq 2^{\texttt{bits}}$
+ \item \emph{or} Transform berechnet sum over subsets
+ $\rightarrow$ inverse für inclusion/exclusion
\end{itemize}
- %\sourcecode{math/fft.cpp}
- %\sourcecode{math/ntt.cpp}
\sourcecode{math/transforms/fft.cpp}
\sourcecode{math/transforms/ntt.cpp}
\sourcecode{math/transforms/bitwiseTransforms.cpp}
- Multiplikation mit 2 transforms statt 3: (nur benutzten wenn nötig!)
+ Multiplikation mit 2 Transforms statt 3: (nur benutzen wenn nötig!)
\sourcecode{math/transforms/fftMul.cpp}
\end{algorithm}
@@ -345,7 +354,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \subsection{Kombinatorik}
-\paragraph{Wilsons Theorem}
+\paragraph{\textsc{Wilson}'s Theorem}
A number $n$ is prime if and only if
$(n-1)!\equiv -1\bmod{n}$.\\
($n$ is prime if and only if $(m-1)!\cdot(n-m)!\equiv(-1)^m\bmod{n}$ for all $m$ in $\{1,\dots,n\}$)
@@ -357,14 +366,14 @@ $(n-1)!\equiv -1\bmod{n}$.\\ \end{cases}
\end{align*}
-\paragraph{\textsc{Zeckendorfs} Theorem}
+\paragraph{\textsc{Zeckendorf}'s Theorem}
Jede positive natürliche Zahl kann eindeutig als Summe einer oder mehrerer
verschiedener \textsc{Fibonacci}-Zahlen geschrieben werden, sodass keine zwei
aufeinanderfolgenden \textsc{Fibonacci}-Zahlen in der Summe vorkommen.\\
\emph{Lösung:} Greedy, nimm immer die größte \textsc{Fibonacci}-Zahl, die noch
hineinpasst.
-\paragraph{\textsc{Lucas}-Theorem}
+\paragraph{\textsc{Lucas}'s Theorem}
Ist $p$ prim, $m=\sum_{i=0}^km_ip^i$, $n=\sum_{i=0}^kn_ip^i$ ($p$-adische Darstellung),
so gilt
\vspace{-0.75\baselineskip}
@@ -380,26 +389,29 @@ so gilt \begin{methods}
\method{precalc}{berechnet $n!$ und $n!^{-1}$ vor}{\mathit{lim}}
- \method{calc\_binom}{berechnet Binomialkoeffizient}{1}
+ \method{binom}{berechnet Binomialkoeffizient}{1}
\end{methods}
\sourcecode{math/binomial0.cpp}
Falls $n >= p$ for $\mathit{mod}=p^k$ berechne \textit{fac} und \textit{inv} aber teile $p$ aus $i$ und berechne die häufigkeit von $p$ in $n!$ als $\sum\limits_{i=1}\big\lfloor\frac{n}{p^i}\big\rfloor$
\begin{methods}
- \method{calc\_binom}{berechnet Binomialkoeffizient $(n \le 61)$}{k}
+ \method{binom}{berechnet Binomialkoeffizient $(n \le 61)$}{k}
\end{methods}
\sourcecode{math/binomial1.cpp}
+ \optional{
+ \begin{methods}
+ \method{binom}{berechnet Primfaktoren vom Binomialkoeffizient}{n\*\log(n)}
+ \end{methods}
+ \opthint \\
+ \textbf{WICHTIG:} braucht alle Primzahlen $\leq n$
+ \sourcecode{math/binomial2.cpp}
+ }
+
\begin{methods}
- \method{calc\_binom}{berechnet Binomialkoeffizient modulo Primzahl $p$}{p-n}
+ \method{binom}{berechnet Binomialkoeffizient modulo Primzahl $p$}{p-n}
\end{methods}
\sourcecode{math/binomial3.cpp}
-
-% \begin{methods}
-% \method{calc\_binom}{berechnet Primfaktoren vom Binomialkoeffizient}{n}
-% \end{methods}
-% \textbf{WICHTIG:} braucht alle Primzahlen $\leq n$
-% \sourcecode{math/binomial2.cpp}
%\end{algorithm}
\paragraph{\textsc{Catalan}-Zahlen:}
@@ -416,7 +428,7 @@ $1,~1,~2,~5,~14,~42,~132,~429,~1430,~4862,~16796,~58786,~208012,~742900$ \end{itemize}
\end{itemize}
\[C_0 = 1\qquad C_n = \sum\limits_{k = 0}^{n - 1} C_kC_{n - 1 - k} =
-\frac{1}{n + 1}\binom{2n}{n} = \frac{4n - 2}{n+1} \cdot C_{n-1}\]
+\frac{1}{n + 1}\binom{2n}{n} = \frac{4n - 2}{n+1} C_{n-1} \sim \frac{4^n}{n^{3/2} \sqrt{\pi}}\]
\begin{itemize}
\item Formel $1$ ohne Division in \runtime{n^2}, Formel $2$ und $3$ in \runtime{n}
\end{itemize}
@@ -426,72 +438,64 @@ $1,~1,~2,~5,~14,~42,~132,~429,~1430,~4862,~16796,~58786,~208012,~742900$ \item Anzahl an Klammerausdrücken mit $n+k$ Klammerpaaren, die mit $\texttt{(}^k$ beginnen.
\end{itemize}
\[C^k_0 = 1\qquad C^k_n = \sum\limits_{\mathclap{a_0+a_1+\dots+a_k=n}} C_{a_0}C_{a_1}\cdots C_{a_k} =
-\frac{k+1}{n+k+1}\binom{2n+k}{n} = \frac{(2n+k-1)\cdot(2n+k)}{n(n+k+1)} \cdot C_{n-1}\]
+\frac{k+1}{n+k+1}\binom{2n+k}{n} = \frac{(2n+k-1)(2n+k)}{n(n+k+1)} C_{n-1}\]
-\paragraph{\textsc{Euler}-Zahlen 1. Ordnung:}
+\paragraph{\textsc{Euler}-Zahlen:}
$|~1~|~1~|~1,~1~|~1,~4,~1~|~1,~11,~11,~1~|~1,~26,~66,~26,~1~|$
Die Anzahl der Permutationen von $\{1, \ldots, n\}$ mit genau $k$ Anstiegen.
Für die $n$-te Zahl gibt es $n$ mögliche Positionen zum Einfügen.
Dabei wird entweder ein Anstieg in zwei gesplitted oder ein Anstieg um $n$ ergänzt.
\[\eulerI{n}{0} = \eulerI{n}{n-1} = 1 \quad
-\eulerI{n}{k} = (k+1) \eulerI{n-1}{k} + (n-k) \eulerI{n-1}{k-1}=
-\sum_{i=0}^{k} (-1)^i\binom{n+1}{i}(k+1-i)^n\]
-\begin{itemize}
- \item Formel $1$ohne Division in \runtime{n^2}, Formel $2$ erlaubt Berechnung in \runtime{n\log(n)}
-\end{itemize}
-
-\paragraph{\textsc{Euler}-Zahlen 2. Ordnung:}
-$|~1~|~1,~0~|~1,~2,~0~|~1,~8,~6,~0~|~1,~22,~58,~24,~0~|$
-
-Die Anzahl der Permutationen von $\{1,1, \ldots, n,n\}$ mit genau $k$ Anstiegen.
-\[\eulerII{n}{0} = 1 \qquad\eulerII{n}{n} = 0 \qquad\eulerII{n}{k} = (k+1) \eulerII{n-1}{k} + (2n-k-1) \eulerII{n-1}{k-1}\]
-\begin{itemize}
- \item Formel ohne Division in \runtime{n^2}
-\end{itemize}
+\eulerI{n}{k} = (k+1) \eulerI{n-1}{k} + (n-k) \eulerI{n-1}{k-1}\]
+\[
+\eulerI{n}{k} = [x^k]
+ \left(\sum_{i=0}^\infty (i+1)^n x^i\right)
+ \left(\sum_{i=0}^\infty (-1)^i \binom{n+1}{i} x^i\right)
+\]
-\paragraph{\textsc{Stirling}-Zahlen 1. Ordnung:}
+\paragraph{\textsc{Stirling}-Zahlen 1. Art:}
$|~1~|~0,~1~|~0,~1,~1~|~0,~2,~3,~1~|~0,~6,~11,~6,~1~|$
Die Anzahl der Permutationen von $\{1, \ldots, n\}$ mit genau $k$ Zyklen.
-Es gibt zwei Möglichkeiten für die $n$-te Zahl. Entweder sie bildet einen eigene Zyklus, oder sie kann an jeder Position in jedem Zyklus einsortiert werden.
+Es gibt zwei Möglichkeiten für die $n$-te Zahl. Entweder sie bildet einen eigenen Zyklus, oder sie kann an jeder Position in jedem Zyklus einsortiert werden.
\[\stirlingI{0}{0} = 1 \qquad
\stirlingI{n}{0} = \stirlingI{0}{k} = 0 \qquad
\stirlingI{n}{k} = \stirlingI{n-1}{k-1} + (n-1) \stirlingI{n-1}{k}\]
-\begin{itemize}
- \item Formel ohne Division in \runtime{n^2}
-\end{itemize}
-\[\sum_{k=0}^{n}\pm\stirlingI{n}{k}x^k=x(x-1)(x-2)\cdots(x-n+1)\]
-\begin{itemize}
- \item Berechne Polynom mit FFT und benutzte betrag der Koeffizienten \runtime{n\log(n)^2} (nur ca. gleich große Polynome zusammen multiplizieren beginnend mit $x-k$)
-\end{itemize}
+\[
+\stirlingI{n}{k}
+= [x^k]\prod_{i=0}^{n-1} (x+i)
+= n! [x^{n-k}] \frac{1}{k!} \left(\sum_{i=0}^\infty \frac{1}{i+1}x^i\right)^k
+\]
-\paragraph{\textsc{Stirling}-Zahlen 2. Ordnung:}
+\paragraph{\textsc{Stirling}-Zahlen 2. Art:}
$|~1~|~0,~1~|~0,~1,~1~|~0,~1,~3,~1~|~0,~1,~7,~6,~1~|$
Die Anzahl der Möglichkeiten $n$ Elemente in $k$ nichtleere Teilmengen zu zerlegen.
Es gibt $k$ Möglichkeiten die $n$ in eine $n-1$-Partition einzuordnen.
Dazu kommt der Fall, dass die $n$ in ihrer eigenen Teilmenge (alleine) steht.
\[\stirlingII{n}{1} = \stirlingII{n}{n} = 1 \qquad
-\stirlingII{n}{0} = 0 \qquad
-\stirlingII{n}{k} = k \stirlingII{n-1}{k} + \stirlingII{n-1}{k-1} =
-\frac{1}{k!} \sum\limits_{i=0}^{k} (-1)^{k-i}\binom{k}{i}i^n\]
-\begin{itemize}
- \item Formel $1$ ohne Division in \runtime{n^2}, Formel $2$ in \runtime{n\log(n)}
-\end{itemize}
+\stirlingII{n}{k} = k \stirlingII{n-1}{k} + \stirlingII{n-1}{k-1}
+\]
+\[
+\stirlingII{n}{k}
+= [x^k]
+ \left(\sum_{i=0}^\infty \frac{i^n}{i!}x^i\right)
+ \left(\sum_{i=0}^\infty \frac{(-1)^i}{i!}x^i\right)
+= n! [x^{n-k}] \frac{1}{k!} \left(\sum_{i=0}^\infty \frac{1}{(i+1)!}x^i\right)^k
+\]
\paragraph{\textsc{Bell}-Zahlen:}
$1,~1,~2,~5,~15,~52,~203,~877,~4140,~21147,~115975,~678570,~4213597$
Anzahl der Partitionen von $\{1, \ldots, n\}$.
-Wie \textsc{Stirling}-Zahlen 2. Ordnung ohne Limit durch $k$.
-\vspace{-0.2cm}%
-\[B_0 = 1 \qquad
+Wie \textsc{Stirling}-Zahlen 2. Art ohne Limit durch $k$.
+\[B_1 = 1 \qquad
B_n = \sum\limits_{k = 0}^{n - 1} B_k\binom{n-1}{k}
-= \sum\limits_{k = 0}^{n}\stirlingII{n}{k}\qquad\qquad B_{p^m+n}\equiv m\cdot B_n + B_{n+1} \bmod{p}\]
-\begin{itemize}
- \item Alternative: $B(n) = \mathrm{EGF}\big(e^{e^n-1}\big)$ (\code{poly_exp} auf Seite \pageref{code:math/transforms/seriesOperations.cpp})
-\end{itemize}
+= \sum\limits_{k = 0}^{n}\stirlingII{n}{k}
+= n! [x^n] e^{e^x-1}
+\qquad
+B_{p^m+n}\equiv m\cdot B_n + B_{n+1} \bmod{p}\]
\paragraph{$\boldsymbol{k}$ Partitions:}
$|~1~|~0,~1~|~0,~1,~1~|~0,~1,~1,~1~|~0,~1,~2,~1,~1~|~0,~1,~2,~2,~1,~1~|~0,~1,~3,~3,~2,~1,~1~|$
@@ -499,19 +503,22 @@ $|~1~|~0,~1~|~0,~1,~1~|~0,~1,~1,~1~|~0,~1,~2,~1,~1~|~0,~1,~2,~2,~1,~1~|~0,~1,~3, Die Anzahl der Partitionen von $n$ in genau $k$ positive Summanden.
Die Anzahl der Partitionen von $n$ mit größtem Elementen $k$.
\begin{align*}
- p_0(0)=1 \qquad p_k(n)&=0 \text{ für } k > n \text{ oder } n \leq 0 \text{ oder } k \leq 0\\[-2pt]
- p_k(n)&= p_k(n-k) + p_{k-1}(n-1)\\[-0.55cm]
+ p_0(0)=1 \qquad p_k(n)&=0 \text{ für } k > n \text{ oder } n \leq 0 \text{ oder } k \leq 0\\
+ p_k(n)&= p_k(n-k) + p_{k-1}(n-1)\\
\end{align*}
-\begin{itemize}
- \item Anzahl der Partitionen von $n$ in bis zu $k$ Summanden: $\sum\limits_{i=0}^{k}p_i(n)=p_k(n+k)$
-\end{itemize}
\paragraph{Partitions:}
$1,~1,~2,~3,~5,~7,~11,~15,~22,~30,~42,~56,~77,~101,~135,~176,~231,~297,~385,~490,~627$
-\[p(n)=\sum_{k=1}^{n} p_k(n)=p_n(2n)=\sum\limits_{k=1}^\infty(-1)^{k+1}\bigg[p\bigg(n - \frac{k(3k-1)}{2}\bigg) + p\bigg(n - \frac{k(3k+1)}{2}\bigg)\bigg]\]
+
+\begin{align*}
+ p(n)=\sum_{k=1}^{n} p_k(n)&=p_n(2n)=\sum\limits_{k=1}^\infty(-1)^{k+1}\bigg[p\bigg(n - \frac{k(3k-1)}{2}\bigg) + p\bigg(n - \frac{k(3k+1)}{2}\bigg)\bigg] \\
+ p(n)&=[x^n] \left(\sum_{k=-\infty}^\infty (-1)^k x^{k(3k-1)/2}\right)^{-1}
+ \sim \frac{1}{4 \sqrt{3} n} \exp\left(\pi \sqrt{\frac{2n}{3}}\right)
+\end{align*}
\begin{itemize}
- \item Rekursion abbrechen wenn argument negativ wird $\Longrightarrow$ Laufzeit $\runtime{\sqrt{n}}$
- \item Alternative: $p(n) = \mathrm{OGF(A)}^{-1}$ mit $A\mkern-1mu\big[\frac{k(3k\pm1)}{2}\big]\coloneqq(-1)^k$ (\code{poly_inv} auf Seite \pageref{code:math/transforms/seriesOperations.cpp})
+ \item in Formel $3$ kann abgebrochen werden wenn $\frac{k(3k-1)}{2} > n$.
+ $\rightarrow$ \runtime{n \sqrt{n}}
+ \item Die Anzahl der Partitionen von $n$ in bis zu $k$ positive Summanden ist $\sum\limits_{i=0}^{k}p_i(n)=p_k(n+k)$.
\end{itemize}
\subsection{The Twelvefold Way \textnormal{(verteile $n$ Bälle auf $k$ Boxen)}}
@@ -563,10 +570,10 @@ Wenn man $k$ Spiele in den Zuständen $X_1, \ldots, X_k$ hat, dann ist die \text \input{math/tables/series}
\subsection{Wichtige Zahlen}
-\input{math/tables/composite}
+\input{math/tables/prime-composite}
-\subsection{Recover $\boldsymbol{x}$ and $\boldsymbol{y}$ from $\boldsymbol{y}$ from $\boldsymbol{x\*y^{-1}}$ }
-\method{recover}{findet $x$ und $y$ für $x=x\*y^{-1}\bmod m$}{\log(m)}
+\subsection{Recover $\boldsymbol{x}$ and $\boldsymbol{y}$ from $\boldsymbol{x\*y^{-1}}$ }
+\method{recover}{findet $x$ und $y$ für $c=x\*y^{-1}\bmod m$}{\log(m)}
\textbf{WICHTIG:} $x$ und $y$ müssen kleiner als $\sqrt{\nicefrac{m}{2}}$ sein!
\sourcecode{math/recover.cpp}
diff --git a/content/math/matrixPower.cpp b/content/math/matrixPower.cpp index d981e6e..d80dac6 100644 --- a/content/math/matrixPower.cpp +++ b/content/math/matrixPower.cpp @@ -1,14 +1,14 @@ vector<mat> pows; void precalc(mat m) { - pows = {mat(sz(m.m), 1), m}; - for (int i = 1; i < 60; i++) pows.push_back(pows[i] * pows[i]); + pows = {m}; + for (int i = 0; i < 60; i++) pows.push_back(pows[i] * pows[i]); } auto calc(ll b, vector<ll> v) { - for (ll i = 1; b > 0; i++) { + for (ll i = 0; b > 0; i++) { if (b & 1) v = pows[i] * v; - b /= 2; + b >>= 1; } return v; } diff --git a/content/math/modExp.cpp b/content/math/modExp.cpp deleted file mode 100644 index 2329a94..0000000 --- a/content/math/modExp.cpp +++ /dev/null @@ -1,6 +0,0 @@ -ll powMod(ll a, ll b, ll n) { - if(b == 0) return 1; - if(b == 1) return a % n; - if(b & 1) return (powMod(a, b - 1, n) * a) % n; - else return powMod((a * a) % n, b / 2, n); -} diff --git a/content/math/permIndex.cpp b/content/math/permIndex.cpp index 4cffc12..563b33a 100644 --- a/content/math/permIndex.cpp +++ b/content/math/permIndex.cpp @@ -1,12 +1,12 @@ ll permIndex(vector<ll> v) { Tree<ll> t; - reverse(all(v)); + ranges::reverse(v); for (ll& x : v) { t.insert(x); x = t.order_of_key(x); } ll res = 0; - for (int i = sz(v); i > 0; i--) { + for (int i = ssize(v); i > 0; i--) { res = res * i + v[i - 1]; } return res; diff --git a/content/math/piLegendre.cpp b/content/math/piLegendre.cpp index 21b974b..6401a4f 100644 --- a/content/math/piLegendre.cpp +++ b/content/math/piLegendre.cpp @@ -1,23 +1,23 @@ -constexpr ll cache = 500; // requires O(cache^3)
-vector<vector<ll>> memo(cache * cache, vector<ll>(cache));
-
-ll pi(ll n);
-
-ll phi(ll n, ll k) {
- if (n <= 1 || k < 0) return 0;
- if (n <= primes[k]) return n - 1;
- if (n < N && primes[k] * primes[k] > n) return n - pi(n) + k;
- bool ok = n < cache * cache;
- if (ok && memo[n][k] > 0) return memo[n][k];
- ll res = n/primes[k] - phi(n/primes[k], k - 1) + phi(n, k - 1);
- if (ok) memo[n][k] = res;
- return res;
-}
-
-ll pi(ll n) {
- if (n < N) { // implement this as O(1) lookup for speedup!
- return distance(primes.begin(), upper_bound(all(primes), n));
- } else {
- ll k = pi(sqrtl(n) + 1);
- return n - phi(n, k) + k;
-}}
+constexpr ll cache = 500; // requires O(cache^3) +vector<vector<ll>> memo(cache * cache, vector<ll>(cache)); + +ll pi(ll n); + +ll phi(ll n, ll k) { + if (n <= 1 || k < 0) return 0; + if (n <= primes[k]) return n - 1; + if (n < N && primes[k] * primes[k] > n) return n - pi(n) + k; + bool ok = n < cache * cache; + if (ok && memo[n][k] > 0) return memo[n][k]; + ll res = n/primes[k] - phi(n/primes[k], k - 1) + phi(n, k - 1); + if (ok) memo[n][k] = res; + return res; +} + +ll pi(ll n) { + if (n < N) { // implement this as O(1) lookup for speedup! + return ranges::upper_bound(primes, n) - begin(primes); + } else { + ll k = pi(sqrtl(n) + 1); + return n - phi(n, k) + k; +}} diff --git a/content/math/polynomial.cpp b/content/math/polynomial.cpp index 84f3aaa..12a4fd7 100644 --- a/content/math/polynomial.cpp +++ b/content/math/polynomial.cpp @@ -4,15 +4,15 @@ struct poly { poly(int deg = 0) : data(1 + deg) {} poly(initializer_list<ll> _data) : data(_data) {} - int size() const {return sz(data);} + int size() const { return ssize(data); } void trim() { for (ll& x : data) x = (x % mod + mod) % mod; while (size() > 1 && data.back() == 0) data.pop_back(); } - ll& operator[](int x) {return data[x];} - const ll& operator[](int x) const {return data[x];} + ll& operator[](int x) { return data[x]; } + const ll& operator[](int x) const { return data[x]; } ll operator()(int x) const { ll res = 0; diff --git a/content/math/primeSieve.cpp b/content/math/primeSieve.cpp index 1b0f514..2b2bf26 100644 --- a/content/math/primeSieve.cpp +++ b/content/math/primeSieve.cpp @@ -8,7 +8,7 @@ bool isPrime(ll x) { } void primeSieve() { - for (ll i = 3; i < N; i += 2) {// i * i < N reicht für isPrime + for (ll i = 3; i < N; i += 2) { // i * i < N reicht für isPrime if (!isNotPrime[i / 2]) { primes.push_back(i); // optional for (ll j = i * i; j < N; j+= 2 * i) { diff --git a/content/math/recover.cpp b/content/math/recover.cpp index 1a593f0..a4c22aa 100644 --- a/content/math/recover.cpp +++ b/content/math/recover.cpp @@ -1,4 +1,4 @@ -ll sq(ll x) {return x*x;} +ll sq(ll x) { return x*x; } array<ll, 2> recover(ll c, ll m) { array<ll, 2> u = {m, 0}, v = {c, 1}; diff --git a/content/math/rho.cpp b/content/math/rho.cpp index ad640cd..c7f7a70 100644 --- a/content/math/rho.cpp +++ b/content/math/rho.cpp @@ -2,7 +2,7 @@ using lll = __int128; ll rho(ll n) { // Findet Faktor < n, nicht unbedingt prim. if (n % 2 == 0) return 2; ll x = 0, y = 0, prd = 2, i = n/2 + 7; - auto f = [&](lll c){return (c * c + i) % n;}; + auto f = [&](lll c) { return (c * c + i) % n; }; for (ll t = 30; t % 40 || gcd(prd, n) == 1; t++) { if (x == y) x = ++i, y = f(x); if (ll q = (lll)prd * abs(x-y) % n; q) prd = q; @@ -13,7 +13,7 @@ ll rho(ll n) { // Findet Faktor < n, nicht unbedingt prim. void factor(ll n, map<ll, int>& facts) { if (n == 1) return; - if (isPrime(n)) {facts[n]++; return;} + if (isPrime(n)) { facts[n]++; return; } ll f = rho(n); factor(n / f, facts); factor(f, facts); } diff --git a/content/math/shortModInv.cpp b/content/math/shortModInv.cpp index cf91ca0..7d3002c 100644 --- a/content/math/shortModInv.cpp +++ b/content/math/shortModInv.cpp @@ -1,3 +1,3 @@ ll multInv(ll x, ll m) { // x^{-1} mod m - return 1 < x ? m - multInv(m % x, x) * m / x : 1; + return 1 < (x %= m) ? m - multInv(m, x) * m / x : 1; } diff --git a/content/math/simpson.cpp b/content/math/simpson.cpp index 7f237a4..da9c002 100644 --- a/content/math/simpson.cpp +++ b/content/math/simpson.cpp @@ -1,4 +1,4 @@ -//double f(double x) {return x;} +//double f(double x) { return x; } double simps(double a, double b) { return (f(a) + 4.0 * f((a + b) / 2.0) + f(b)) * (b - a) / 6.0; diff --git a/content/math/sqrtModCipolla.cpp b/content/math/sqrtModCipolla.cpp index 1fac0c5..c062646 100644 --- a/content/math/sqrtModCipolla.cpp +++ b/content/math/sqrtModCipolla.cpp @@ -1,4 +1,4 @@ -ll sqrtMod(ll a, ll p) {// teste mit legendre ob lösung existiert +ll sqrtMod(ll a, ll p) {// teste mit Legendre ob Lösung existiert if (a < 2) return a; ll t = 0; while (legendre((t*t-4*a) % p, p) >= 0) t = rng() % p; diff --git a/content/math/tables/composite.tex b/content/math/tables/composite.tex deleted file mode 100644 index 7a6ab09..0000000 --- a/content/math/tables/composite.tex +++ /dev/null @@ -1,26 +0,0 @@ -\begin{expandtable} -\begin{tabularx}{\linewidth}{|r||r|R||r||r|} - \hline - $10^x$ & Highly Composite & \# Divs & \# prime Divs & \# Primes \\ - \hline - 1 & 6 & 4 & 2 & 4 \\ - 2 & 60 & 12 & 3 & 25 \\ - 3 & 840 & 32 & 4 & 168 \\ - 4 & 7\,560 & 64 & 5 & 1\,229 \\ - 5 & 83\,160 & 128 & 6 & 9\,592 \\ - 6 & 720\,720 & 240 & 7 & 78\,498 \\ - 7 & 8\,648\,640 & 448 & 8 & 664\,579 \\ - 8 & 73\,513\,440 & 768 & 8 & 5\,761\,455 \\ - 9 & 735\,134\,400 & 1\,344 & 9 & 50\,847\,534 \\ - 10 & 6\,983\,776\,800 & 2\,304 & 10 & 455\,052\,511 \\ - 11 & 97\,772\,875\,200 & 4\,032 & 10 & 4\,118\,054\,813 \\ - 12 & 963\,761\,198\,400 & 6\,720 & 11 & 37\,607\,912\,018 \\ - 13 & 9\,316\,358\,251\,200 & 10\,752 & 12 & 346\,065\,536\,839 \\ - 14 & 97\,821\,761\,637\,600 & 17\,280 & 12 & 3\,204\,941\,750\,802 \\ - 15 & 866\,421\,317\,361\,600 & 26\,880 & 13 & 29\,844\,570\,422\,669 \\ - 16 & 8\,086\,598\,962\,041\,600 & 41\,472 & 13 & 279\,238\,341\,033\,925 \\ - 17 & 74\,801\,040\,398\,884\,800 & 64\,512 & 14 & 2\,623\,557\,157\,654\,233 \\ - 18 & 897\,612\,484\,786\,617\,600 & 103\,680 & 16 & 24\,739\,954\,287\,740\,860 \\ - \hline -\end{tabularx} -\end{expandtable} diff --git a/content/math/tables/nim.tex b/content/math/tables/nim.tex index 66e289e..3c36bbb 100644 --- a/content/math/tables/nim.tex +++ b/content/math/tables/nim.tex @@ -1,3 +1,4 @@ +In jedem Zug, wähle $m \in M$ und nimm $m$ Objekte. \begin{expandtable} \begin{tabularx}{\linewidth}{|p{0.37\linewidth}|X|} \hline @@ -84,14 +85,12 @@ $\mathit{SG}_n = n - 1$, falls $n \equiv 0 \bmod 4$\\ \hline - \textsc{Kayles}' Nim:\newline - Zwei mögliche Züge:\newline - 1) Nehme beliebige Zahl.\newline - 2) Teile Stapel in zwei Stapel (mit Entnahme).& + Kayles:\newline + Nimm 1 oder 2, dann teile den Stapel optional in zwei Stapel.& Berechne $\mathit{SG}_n$ für kleine $n$ rekursiv.\newline $n \in [72,83]: \quad 4, 1, 2, 8, 1, 4, 7, 2, 1, 8, 2, 7$\newline Periode ab $n = 72$ der Länge $12$.\\ \hline \end{tabularx} \end{expandtable} -
\ No newline at end of file + diff --git a/content/math/tables/prime-composite.tex b/content/math/tables/prime-composite.tex new file mode 100644 index 0000000..b8adadf --- /dev/null +++ b/content/math/tables/prime-composite.tex @@ -0,0 +1,31 @@ +\begin{expandtable} +\begin{tabularx}{\linewidth}{|r|rIr|rIr|r|R|} + \hline + \multirow{2}{*}{$10^x$} + & \multirow{2}{*}{Highly Composite} + & \multirow{2}{*}{\# Divs} + & \multicolumn{2}{c|}{Prime} + & \multirow{2}{*}{\# Primes} & \# Prime \\ + & & & $<$ & $>$ & & Factors \\ + \hline + 1 & 6 & 4 & $-3$ & $+1$ & 4 & 2 \\ + 2 & 60 & 12 & $-3$ & $+1$ & 25 & 3 \\ + 3 & 840 & 32 & $-3$ & $+9$ & 168 & 4 \\ + 4 & 7\,560 & 64 & $-27$ & $+7$ & 1\,229 & 5 \\ + 5 & 83\,160 & 128 & $-9$ & $+3$ & 9\,592 & 6 \\ + 6 & 720\,720 & 240 & $-17$ & $+3$ & 78\,498 & 7 \\ + 7 & 8\,648\,640 & 448 & $-9$ & $+19$ & 664\,579 & 8 \\ + 8 & 73\,513\,440 & 768 & $-11$ & $+7$ & 5\,761\,455 & 8 \\ + 9 & 735\,134\,400 & 1\,344 & $-63$ & $+7$ & 50\,847\,534 & 9 \\ + 10 & 6\,983\,776\,800 & 2\,304 & $-33$ & $+19$ & 455\,052\,511 & 10 \\ + 11 & 97\,772\,875\,200 & 4\,032 & $-23$ & $+3$ & 4\,118\,054\,813 & 10 \\ + 12 & 963\,761\,198\,400 & 6\,720 & $-11$ & $+39$ & 37\,607\,912\,018 & 11 \\ + 13 & 9\,316\,358\,251\,200 & 10\,752 & $-29$ & $+37$ & 346\,065\,536\,839 & 12 \\ + 14 & 97\,821\,761\,637\,600 & 17\,280 & $-27$ & $+31$ & 3\,204\,941\,750\,802 & 12 \\ + 15 & 866\,421\,317\,361\,600 & 26\,880 & $-11$ & $+37$ & 29\,844\,570\,422\,669 & 13 \\ + 16 & 8\,086\,598\,962\,041\,600 & 41\,472 & $-63$ & $+61$ & 279\,238\,341\,033\,925 & 13 \\ + 17 & 74\,801\,040\,398\,884\,800 & 64\,512 & $-3$ & $+3$ & 2\,623\,557\,157\,654\,233 & 14 \\ + 18 & 897\,612\,484\,786\,617\,600 & 103\,680 & $-11$ & $+3$ & 24\,739\,954\,287\,740\,860 & 15 \\ + \hline +\end{tabularx} +\end{expandtable} diff --git a/content/math/transforms/andTransform.cpp b/content/math/transforms/andTransform.cpp index 93a323a..87bae0b 100644 --- a/content/math/transforms/andTransform.cpp +++ b/content/math/transforms/andTransform.cpp @@ -1,8 +1,8 @@ void fft(vector<ll>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); for (int s = 1; s < n; s *= 2) { for (int i = 0; i < n; i += 2 * s) { for (int j = i; j < i + s; j++) { - ll& u = a[j], &v = a[j + s]; + ll &u = a[j], &v = a[j + s]; u = inv ? u - v : u + v; }}}} diff --git a/content/math/transforms/bitwiseTransforms.cpp b/content/math/transforms/bitwiseTransforms.cpp index fbe3792..c0f6e50 100644 --- a/content/math/transforms/bitwiseTransforms.cpp +++ b/content/math/transforms/bitwiseTransforms.cpp @@ -1,9 +1,9 @@ void bitwiseConv(vector<ll>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); for (int s = 1; s < n; s *= 2) { for (int i = 0; i < n; i += 2 * s) { for (int j = i; j < i + s; j++) { - ll& u = a[j], &v = a[j + s]; + ll &u = a[j], &v = a[j + s]; u = inv ? u - v : u + v; // AND //v = inv ? v - u : v + u; // OR //tie(u, v) = pair(u + v, u - v); // XOR diff --git a/content/math/transforms/fft.cpp b/content/math/transforms/fft.cpp index 2bd95b2..1f80e36 100644 --- a/content/math/transforms/fft.cpp +++ b/content/math/transforms/fft.cpp @@ -1,7 +1,7 @@ using cplx = complex<double>; void fft(vector<cplx>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); for (int i = 0, j = 1; j < n - 1; ++j) { for (int k = n >> 1; k > (i ^= k); k >>= 1); if (j < i) swap(a[i], a[j]); diff --git a/content/math/transforms/fftMul.cpp b/content/math/transforms/fftMul.cpp index 660ed79..da6a538 100644 --- a/content/math/transforms/fftMul.cpp +++ b/content/math/transforms/fftMul.cpp @@ -1,8 +1,8 @@ vector<cplx> mul(vector<ll>& a, vector<ll>& b) { - int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); - vector<cplx> c(all(a)), d(n); + int n = 1 << (__lg(ssize(a) + ssize(b) - 1) + 1); + vector<cplx> c(begin(a), end(a)), d(n); c.resize(n); - for (int i = 0; i < sz(b); i++) c[i] = {real(c[i]), b[i]}; + for (int i = 0; i < ssize(b); i++) c[i] = {real(c[i]), b[i]}; fft(c); for (int i = 0; i < n; i++) { int j = (n - i) & (n - 1); diff --git a/content/math/transforms/multiplyBitwise.cpp b/content/math/transforms/multiplyBitwise.cpp index f7cf169..5275b8c 100644 --- a/content/math/transforms/multiplyBitwise.cpp +++ b/content/math/transforms/multiplyBitwise.cpp @@ -1,5 +1,5 @@ vector<ll> mul(vector<ll> a, vector<ll> b) { - int n = 1 << (__lg(2 * max(sz(a), sz(b)) - 1)); + int n = 1 << (__lg(2 * max(ssize(a), ssize(b)) - 1)); a.resize(n), b.resize(n); bitwiseConv(a), bitwiseConv(b); for (int i=0; i<n; i++) a[i] *= b[i]; // MOD? diff --git a/content/math/transforms/multiplyFFT.cpp b/content/math/transforms/multiplyFFT.cpp index 0022d1f..963be94 100644 --- a/content/math/transforms/multiplyFFT.cpp +++ b/content/math/transforms/multiplyFFT.cpp @@ -1,6 +1,6 @@ vector<ll> mul(vector<ll>& a, vector<ll>& b) { - int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); - vector<cplx> a2(all(a)), b2(all(b)); + int n = 1 << (__lg(ssize(a) + ssize(b) - 1) + 1); + vector<cplx> a2(begin(a), end(a)), b2(begin(b), end(b)); a2.resize(n), b2.resize(n); fft(a2), fft(b2); for (int i=0; i<n; i++) a2[i] *= b2[i]; diff --git a/content/math/transforms/multiplyNTT.cpp b/content/math/transforms/multiplyNTT.cpp index 806d124..d234ce3 100644 --- a/content/math/transforms/multiplyNTT.cpp +++ b/content/math/transforms/multiplyNTT.cpp @@ -1,5 +1,5 @@ vector<ll> mul(vector<ll> a, vector<ll> b) { - int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); + int n = 1 << bit_width(size(a) + size(b) - 1); a.resize(n), b.resize(n); ntt(a), ntt(b); for (int i=0; i<n; i++) a[i] = a[i] * b[i] % mod; diff --git a/content/math/transforms/ntt.cpp b/content/math/transforms/ntt.cpp index ca605d3..fc7874e 100644 --- a/content/math/transforms/ntt.cpp +++ b/content/math/transforms/ntt.cpp @@ -1,7 +1,7 @@ constexpr ll mod = 998244353, root = 3; void ntt(vector<ll>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); auto b = a; ll r = inv ? powMod(root, mod - 2, mod) : root; diff --git a/content/math/transforms/orTransform.cpp b/content/math/transforms/orTransform.cpp index 60b4426..1833ac5 100644 --- a/content/math/transforms/orTransform.cpp +++ b/content/math/transforms/orTransform.cpp @@ -1,8 +1,8 @@ void fft(vector<ll>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); for (int s = 1; s < n; s *= 2) { for (int i = 0; i < n; i += 2 * s) { for (int j = i; j < i + s; j++) { - ll& u = a[j], &v = a[j + s]; + ll &u = a[j], &v = a[j + s]; v = inv ? v - u : v + u; }}}} diff --git a/content/math/transforms/seriesOperations.cpp b/content/math/transforms/seriesOperations.cpp index d3e3072..cfac7b9 100644 --- a/content/math/transforms/seriesOperations.cpp +++ b/content/math/transforms/seriesOperations.cpp @@ -17,7 +17,7 @@ vector<ll> poly_inv(const vector<ll>& a, int n) { // a[0] == 1 } vector<ll> poly_deriv(vector<ll> a) { - for (int i = 1; i < sz(a); i++) + for (int i = 1; i < ssize(a); i++) a[i-1] = a[i] * i % mod; a.pop_back(); return a; @@ -25,11 +25,11 @@ vector<ll> poly_deriv(vector<ll> a) { vector<ll> poly_integr(vector<ll> a) { static vector<ll> inv = {0, 1}; - for (static int i = 2; i <= sz(a); i++) + for (static int i = 2; i <= ssize(a); i++) inv.push_back(mod - mod / i * inv[mod % i] % mod); a.push_back(0); - for (int i = sz(a) - 1; i > 0; i--) + for (int i = ssize(a) - 1; i > 0; i--) a[i] = a[i-1] * inv[i] % mod; a[0] = 0; return a; @@ -46,7 +46,7 @@ vector<ll> poly_exp(vector<ll> a, int n) { // a[0] == 0 for (int len = 1; len < n; len *= 2) { vector<ll> p = poly_log(q, 2*len); for (int i = 0; i < 2*len; i++) - p[i] = (mod - p[i] + (i < sz(a) ? a[i] : 0)) % mod; + p[i] = (mod - p[i] + (i < ssize(a) ? a[i] : 0)) % mod; vector<ll> q2 = q; q2.resize(2*len); ntt(p), ntt(q2); diff --git a/content/math/transforms/xorTransform.cpp b/content/math/transforms/xorTransform.cpp index f9d1d82..aa3db8d 100644 --- a/content/math/transforms/xorTransform.cpp +++ b/content/math/transforms/xorTransform.cpp @@ -1,9 +1,9 @@ void fft(vector<ll>& a, bool inv = false) { - int n = sz(a); + int n = ssize(a); for (int s = 1; s < n; s *= 2) { for (int i = 0; i < n; i += 2 * s) { for (int j = i; j < i + s; j++) { - ll& u = a[j], &v = a[j + s]; + ll &u = a[j], &v = a[j + s]; tie(u, v) = pair(u + v, u - v); }}} if (inv) for (ll& x : a) x /= n; diff --git a/content/other/fastIO.cpp b/content/other/fastIO.cpp index 9badcc7..09473f4 100644 --- a/content/other/fastIO.cpp +++ b/content/other/fastIO.cpp @@ -16,7 +16,7 @@ void printPositive(int n) { } void fastprint(int n) { - if(n == 0) {putchar('0'); return;} + if(n == 0) { putchar('0'); return; } if (n < 0) { putchar('-'); printPositive(-n); diff --git a/content/other/fastSubsetSum.cpp b/content/other/fastSubsetSum.cpp index 84396f6..38a84b6 100644 --- a/content/other/fastSubsetSum.cpp +++ b/content/other/fastSubsetSum.cpp @@ -1,11 +1,11 @@ int fastSubsetSum(vector<int> w, int t){ int a = 0, b = 0; - while(b < sz(w) && a + w[b] <= t) a += w[b++]; - if(b == sz(w)) return a; - int m = *max_element(all(w)); + while(b < ssize(w) && a + w[b] <= t) a += w[b++]; + if(b == ssize(w)) return a; + int m = *ranges::max_element(w); vector<int> dp(2*m, -1), old; dp[m+a-t] = b; - for(int i = b; i < sz(w); i++){ + for(int i = b; i < ssize(w); i++){ old = dp; for(int j = 0; j < m; j++){ dp[j+w[i]] = max(dp[j+w[i]], old[j]); @@ -18,4 +18,4 @@ int fastSubsetSum(vector<int> w, int t){ } for(a = t; dp[m+a-t] < 0; a--); return a; -}
\ No newline at end of file +} diff --git a/content/other/josephus2.cpp b/content/other/josephus2.cpp index 33544ea..1c4295d 100644 --- a/content/other/josephus2.cpp +++ b/content/other/josephus2.cpp @@ -1,5 +1,5 @@ -int rotateLeft(int n) { // Der letzte Überlebende, 1-basiert. +ll rotateLeft(ll n) { // Der letzte Überlebende, 0-basiert. int bits = __lg(n); - n ^= 1 << bits; - return 2 * n + 1; + n ^= 1ll << bits; + return n << 1; } diff --git a/content/other/other.tex b/content/other/other.tex index d3e2a47..d8726d4 100644 --- a/content/other/other.tex +++ b/content/other/other.tex @@ -31,7 +31,7 @@ \begin{tabularx}{\linewidth}{|Ll|}
\hline
Bit an Position j lesen & \code{(x \& (1 << j)) != 0} \\
- Bit an Position j setzten & \code{x |= (1 << j)} \\
+ Bit an Position j setzen & \code{x |= (1 << j)} \\
Bit an Position j löschen & \code{x \&= ~(1 << j)} \\
Bit an Position j flippen & \code{x ^= (1 << j)} \\
Anzahl an führenden nullen ($x \neq 0$) & \code{__builtin_clzll(x)} \\
@@ -67,9 +67,7 @@ \paragraph{Quadrangle inequality} Die Bedingung $\forall a\leq b\leq c\leq d:
C[a][d] + C[b][c] \geq C[a][c] + C[b][d]$ ist hinreichend für beide Optimierungen.
- \paragraph{Sum over Subsets DP} $\text{res}[\text{mask}]=\sum_{i\subseteq\text{mask}}\text{in}[i]$.
- Für Summe über Supersets \code{res} einmal vorher und einmal nachher reversen.
- \sourcecode{other/sos.cpp}
+ \paragraph{Sum over Subsets DP} Siehe \emph{or} Transform, Seite \pageref{fft}.
\end{algorithm}
\begin{algorithm}{Fast Subset Sum}
@@ -82,12 +80,12 @@ \sourcecode{other/pbs.cpp}
\end{algorithm}
+\columnbreak
\begin{algorithm}{Josephus-Problem}
$n$ Personen im Kreis, jeder $k$-te wird erschossen.
\begin{description}
\item[Spezialfall $\boldsymbol{k=2}$:] Betrachte $n$ Binär.
- Für $n = 1b_1b_2b_3..b_n$ ist $b_1b_2b_3..b_n1$ die Position des letzten Überlebenden.
- (Rotiere $n$ um eine Stelle nach links)
+ Für $n = 1b_1b_2b_3..b_n$ ist $b_1b_2b_3..b_n0$ die Position des letzten Überlebenden.
\end{description}
\sourcecode{other/josephus2.cpp}
@@ -98,7 +96,6 @@ Also: $F(n,k) = (F(n-1,k)+k)\%n$. Basisfall: $F(1,k) = 0$.
\end{description}
\sourcecode{other/josephusK.cpp}
- \textbf{Beachte bei der Ausgabe, dass die Personen im ersten Fall von $\boldsymbol{1, \ldots, n}$ nummeriert sind, im zweiten Fall von $\boldsymbol{0, \ldots, n-1}$!}
\end{algorithm}
\begin{algorithm}[optional]{Zeileneingabe}
@@ -126,8 +123,8 @@ c'(s',v)&=\sum_{u\in{}V}d(u,v)&c'(v,t')&=\sum_{u\in{}V}d(v,u)\\[-0.5ex]
c'(u,v)&=c(u,v)-d(u,v)&c'(t,s)&=x
\end{align*}
- Löse Fluss auf $G'$ mit \textsc{Dinic's Algorithmus}, wenn alle Kanten von $s'$ saturiert sind ist der Fluss in $G$ gültig. $x$ beschränkt den Fluss in $G$ (Binary-Search für minflow, $\infty$ sonst).
- \item \textbf{\textsc{Johnsons} Reweighting Algorithmus:}
+ Löse Fluss auf $G'$ mit \textsc{Dinitz's Algorithmus}, wenn alle Kanten von $s'$ saturiert sind ist der Fluss in $G$ gültig. $x$ beschränkt den Fluss in $G$ (Binary-Search für minflow, $\infty$ sonst).
+ \item \textbf{\textsc{Johnson}s Reweighting Algorithm:}
Initialisiere alle Entfernungen mit \texttt{d[i] = 0}. Berechne mit \textsc{Bellmann-Ford} kürzeste Entfernungen.
Falls es einen negativen Zyklus gibt abrrechen.
Sonst ändere die Gewichte von allen Kanten \texttt{(u,v)} im ursprünglichen Graphen zu \texttt{d[u]+w[u,v]-d[v]}.
@@ -186,8 +183,8 @@ [X/G] = \frac{1}{\vert G \vert} \sum_{g \in G} m^{\#(g)}
\]
- \item \textbf{Verteilung von Primzahlen:}
- Für alle $n \in \mathbb{N}$ gilt: Ex existiert eine Primzahl $p$ mit $n \leq p \leq 2n$.
+ \item \textbf{\textsc{Bertrand}sches Postulat:}
+ Für alle $n \in \mathbb{N}$ gilt: Ex existiert eine Primzahl $p$ mit $n < p \leq 2n$.
\item \textbf{Satz von \textsc{Kirchhoff} (Anzahl Spannbäume):}
Sei $G$ ein zusammenhängender, ungerichteter Graph evtl. mit Mehrfachkanten.
@@ -203,7 +200,7 @@ Das Ergebnis ist die Anzahl an gerichteten Spannbäumen mit Wurzel $k$,
sodass jeder Knoten einen Pfad zu $k$ hat.
- \item \textbf{\textsc{Dilworths}-Theorem:}
+ \item \textbf{\textsc{Dilworth}'s Theorem:}
Sei $S$ eine Menge und $\leq$ eine partielle Ordnung ($S$ ist ein Poset).
Eine \emph{Kette} ist eine Teilmenge $\{x_1,\ldots,x_n\}$ mit $x_1 \leq \ldots \leq x_n$.
Eine \emph{Partition} ist eine Menge von Ketten, sodass jedes $s \in S$ in genau einer Kette ist.
@@ -222,13 +219,13 @@ Ersetze $u_x, v_x$ durch $x$ und erhalte so minimales Vertexcover vom Poset.
Das Komplement davon ist eine maximale Antikette.
- \item \textbf{\textsc{Turan}'s-Theorem:}
+ \item \textbf{\textsc{Tur\'an}'s Theorem:}
Die Anzahl an Kanten in einem Graphen mit $n$ Knoten der keine clique der größe $x+1$ enthält ist:
\begin{align*}
ext(n, K_{x+1}) &= \binom{n}{2} - \left[\left(x - (n \bmod x)\right) \cdot \binom{\floor{\frac{n}{x}}}{2} + \left(n\bmod x\right) \cdot \binom{\ceil{\frac{n}{x}}}{2}\right]
\end{align*}
- \item \textbf{\textsc{Euler}'s-Polyedersatz:}
+ \item \textbf{\textsc{Euler}scher Polyedersatz:}
In planaren Graphen gilt $n-m+f-c=1$.
\item \textbf{\textsc{Pythagoreische Tripel}:}
@@ -325,3 +322,5 @@ \item Unsicher bei benutzten STL-Funktionen?
\end{itemize}
\end{itemize}
+
+%\input{other/simd}
diff --git a/content/other/pbs.cpp b/content/other/pbs.cpp index f4db2fd..e6bfeac 100644 --- a/content/other/pbs.cpp +++ b/content/other/pbs.cpp @@ -7,7 +7,7 @@ while (true) { focus.emplace_back((low[i] + high[i]) / 2, i); }} if (focus.empty()) break; - sort(all(focus)); + ranges::sort(focus); // reset simulation for (int step = 0; auto [mid, i] : focus) { diff --git a/content/other/sos.cpp b/content/other/sos.cpp deleted file mode 100644 index 01bc44c..0000000 --- a/content/other/sos.cpp +++ /dev/null @@ -1,6 +0,0 @@ -vector<ll> res(in); -for (int i = 1; i < sz(res); i *= 2) { - for (int mask = 0; mask < sz(res); mask++){ - if (mask & i) { - res[mask] += res[mask ^ i]; -}}} diff --git a/content/other/timed.cpp b/content/other/timed.cpp index b3ed4ef..a3ede29 100644 --- a/content/other/timed.cpp +++ b/content/other/timed.cpp @@ -1,3 +1,3 @@ int times = clock(); //run for 900ms -while (1000*(clock()-times)/CLOCKS_PER_SEC < 900) {...} +while (1000*(clock()-times)/CLOCKS_PER_SEC < 900) { ... } diff --git a/content/string/ahoCorasick.cpp b/content/string/ahoCorasick.cpp index 390d16d..d738961 100644 --- a/content/string/ahoCorasick.cpp +++ b/content/string/ahoCorasick.cpp @@ -4,7 +4,8 @@ struct AhoCorasick { int suffix = 0, ch, cnt = 0; array<int, ALPHABET_SIZE> nxt = {}; - vert(int p, int c) : suffix(-p), ch(c) {fill(all(nxt), -1);} + vert(int p, int c): + suffix(-p), ch(c) { ranges::fill(nxt, -1); } }; vector<vert> aho = {{0, -1}}; @@ -13,7 +14,7 @@ struct AhoCorasick { for (auto c : s) { int idx = c - OFFSET; if (aho[v].nxt[idx] == -1) { - aho[v].nxt[idx] = sz(aho); + aho[v].nxt[idx] = ssize(aho); aho.emplace_back(v, idx); } v = aho[v].nxt[idx]; @@ -37,9 +38,9 @@ struct AhoCorasick { vector<vector<int>> adj; vector<ll> dp; void buildGraph() { - adj.resize(sz(aho)); - dp.assign(sz(aho), 0); - for (int i = 1; i < sz(aho); i++) { + adj.resize(ssize(aho)); + dp.assign(ssize(aho), 0); + for (int i = 1; i < ssize(aho); i++) { adj[getSuffix(i)].push_back(i); }} diff --git a/content/string/deBruijn.cpp b/content/string/deBruijn.cpp index e829137..545dde7 100644 --- a/content/string/deBruijn.cpp +++ b/content/string/deBruijn.cpp @@ -1,7 +1,7 @@ string deBruijn(int n, char mi = '0', char ma = '1') { string res, c(1, mi); do { - if (n % sz(c) == 0) res += c; + if (n % ssize(c) == 0) res += c; } while(next(c, n, mi, ma)); return res; } diff --git a/content/string/duval.cpp b/content/string/duval.cpp index 253bae1..de94ebd 100644 --- a/content/string/duval.cpp +++ b/content/string/duval.cpp @@ -1,8 +1,8 @@ vector<pair<int, int>> duval(const string& s) { vector<pair<int, int>> res; - for (int i = 0; i < sz(s);) { + for (int i = 0; i < ssize(s);) { int j = i + 1, k = i; - for (; j < sz(s) && s[k] <= s[j]; j++) { + for (; j < ssize(s) && s[k] <= s[j]; j++) { if (s[k] < s[j]) k = i; else k++; } @@ -15,5 +15,5 @@ vector<pair<int, int>> duval(const string& s) { int minrotation(const string& s) { auto parts = duval(s+s); for (auto [l, r] : parts) { - if (r >= sz(s)) return l; + if (r >= ssize(s)) return l; }} diff --git a/content/string/kmp.cpp b/content/string/kmp.cpp index 421479e..a354aa7 100644 --- a/content/string/kmp.cpp +++ b/content/string/kmp.cpp @@ -1,7 +1,7 @@ vector<int> kmpPreprocessing(const string& sub) { - vector<int> b(sz(sub) + 1); + vector<int> b(ssize(sub) + 1); b[0] = -1; - for (int i = 0, j = -1; i < sz(sub);) { + for (int i = 0, j = -1; i < ssize(sub);) { while (j >= 0 && sub[i] != sub[j]) j = b[j]; b[++i] = ++j; } @@ -9,10 +9,10 @@ vector<int> kmpPreprocessing(const string& sub) { } vector<int> kmpSearch(const string& s, const string& sub) { vector<int> result, pre = kmpPreprocessing(sub); - for (int i = 0, j = 0; i < sz(s);) { + for (int i = 0, j = 0; i < ssize(s);) { while (j >= 0 && s[i] != sub[j]) j = pre[j]; i++; j++; - if (j == sz(sub)) { + if (j == ssize(sub)) { result.push_back(i - j); j = pre[j]; }} diff --git a/content/string/longestCommonSubsequence.cpp b/content/string/longestCommonSubsequence.cpp index 6c9ea44..14ca62c 100644 --- a/content/string/longestCommonSubsequence.cpp +++ b/content/string/longestCommonSubsequence.cpp @@ -1,12 +1,12 @@ string lcss(const string& a, const string& b) { - vector<vector<int>> m(sz(a) + 1, vector<int>(sz(b) + 1)); - for (int i = sz(a) - 1; i >= 0; i--) { - for (int j = sz(b) - 1; j >= 0; j--) { + vector<vector<int>> m(ssize(a) + 1, vector<int>(ssize(b) + 1)); + for (int i = ssize(a) - 1; i >= 0; i--) { + for (int j = ssize(b) - 1; j >= 0; j--) { if (a[i] == b[j]) m[i][j] = 1 + m[i+1][j+1]; else m[i][j] = max(m[i+1][j], m[i][j+1]); }} // Für die Länge: return m[0][0]; string res; - for (int j = 0, i = 0; j < sz(b) && i < sz(a);) { + for (int j = 0, i = 0; j < ssize(b) && i < ssize(a);) { if (a[i] == b[j]) res += a[i++], j++; else if (m[i][j+1] > m[i+1][j]) j++; else i++; diff --git a/content/string/lyndon.cpp b/content/string/lyndon.cpp index e44379b..cb477d4 100644 --- a/content/string/lyndon.cpp +++ b/content/string/lyndon.cpp @@ -1,5 +1,5 @@ bool next(string& s, int maxLen, char mi = '0', char ma = '1') { - for (int i = sz(s), j = sz(s); i < maxLen; i++) + for (int i = ssize(s), j = ssize(s); i < maxLen; i++) s.push_back(s[i % j]); while(!s.empty() && s.back() == ma) s.pop_back(); if (s.empty()) { diff --git a/content/string/manacher.cpp b/content/string/manacher.cpp index 112bd55..9fa2991 100644 --- a/content/string/manacher.cpp +++ b/content/string/manacher.cpp @@ -1,9 +1,9 @@ vector<int> manacher(const string& t) { //transforms "aa" to ".a.a." to find even length palindromes - string s(sz(t) * 2 + 1, '.'); - for (int i = 0; i < sz(t); i++) s[2 * i + 1] = t[i]; + string s(ssize(t) * 2 + 1, '.'); + for (int i = 0; i < ssize(t); i++) s[2 * i + 1] = t[i]; - int mid = 0, r = 0, n = sz(s); + int mid = 0, r = 0, n = ssize(s); vector<int> pal(n); for (int i = 1; i < n - 1; i++) { if (r > i) pal[i] = min(r - i, pal[2 * mid - i]); diff --git a/content/string/rollingHash.cpp b/content/string/rollingHash.cpp index 6e914aa..1157cb7 100644 --- a/content/string/rollingHash.cpp +++ b/content/string/rollingHash.cpp @@ -14,5 +14,5 @@ struct Hash { return (pref[r] - mul(power[r-l], pref[l]) + M) % M; } - static ll mul(__int128 a, ll b) {return a * b % M;} + static ll mul(__int128 a, ll b) { return a * b % M; } }; diff --git a/content/string/rollingHashCf.cpp b/content/string/rollingHashCf.cpp index 84b2e4e..c08a9d3 100644 --- a/content/string/rollingHashCf.cpp +++ b/content/string/rollingHashCf.cpp @@ -13,5 +13,5 @@ struct Hash { return (pref[r] - mul(power[r-l], pref[l]) + M) % M; } - static ll mul(__int128 a, ll b) {return a * b % M;} + static ll mul(__int128 a, ll b) { return a * b % M; } }; diff --git a/content/string/string.tex b/content/string/string.tex index bedabfb..0e482bf 100644 --- a/content/string/string.tex +++ b/content/string/string.tex @@ -63,21 +63,21 @@ \end{algorithm} \clearpage -\begin{algorithm}{Lyndon und De-Bruijn} +\begin{algorithm}{\textsc{Lyndon} und \textsc{De-Bruijn}} \begin{itemize} - \item \textbf{Lyndon-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen. - \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von Lyndon-Worten zerlegt werden. - \item Für Lyndon-Worte $u, v$ mit $u<v$ gilt, dass $uv$ auch ein Lyndon-Wort ist. + \item \textbf{\textsc{Lyndon}-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen. + \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von \textsc{Lyndon}-Worten zerlegt werden. + \item Für \textsc{Lyndon}-Worte $u, v$ mit $u<v$ gilt, dass $uv$ auch ein \textsc{Lyndon}-Wort ist. \end{itemize} \begin{methods} - \method[, Durchschnitt $\Theta(1)$]{next}{lexikographisch nächstes Lyndon-Wort}{n} - \method{duval}{zerlegt $s$ in Lyndon-Worte}{n} + \method[, Durchschnitt $\Theta(1)$]{next}{lexikographisch nächstes \textsc{Lyndon}-Wort}{n} + \method{duval}{zerlegt $s$ in \textsc{Lyndon}-Worte}{n} \method{minrotation}{berechnet kleinste Rotation von $s$}{n} \end{methods} \sourcecode{string/lyndon.cpp} \sourcecode{string/duval.cpp} \begin{itemize} - \item \textbf{De-Bruijn-Sequenze $\boldsymbol{B(\Sigma, n)}$:}~~~ein Wort das jedes Wort der Länge $n$ genau einmal als substring enthält (und minimal ist). Wobei $B(\Sigma, n)$ zyklisch betrachtet wird. + \item \textbf{\textsc{De-Bruijn}-Sequenz $\boldsymbol{B(\Sigma, n)}$:}~~~ein Wort das jedes Wort der Länge $n$ genau einmal als substring enthält (und minimal ist). Wobei $B(\Sigma, n)$ zyklisch betrachtet wird. \item es gibt $\frac{(k!)^{k^{n-1}}}{k^{n}}$ verschiedene $B(\Sigma, n)$ \item $B(\Sigma, n)$ hat Länge $\abs{\Sigma}^n$ \end{itemize} diff --git a/content/string/suffixArray.cpp b/content/string/suffixArray.cpp index 8b698d2..65bbb38 100644 --- a/content/string/suffixArray.cpp +++ b/content/string/suffixArray.cpp @@ -4,22 +4,22 @@ struct SuffixArray { vector<int> SA, LCP; vector<vector<int>> P; - SuffixArray(const string& s) : n(sz(s)), SA(n), LCP(n), + SuffixArray(const string& s) : n(ssize(s)), SA(n), LCP(n), P(__lg(2 * n - 1) + 1, vector<int>(n)) { - P[0].assign(all(s)); - iota(all(SA), 0); - sort(all(SA), [&](int a, int b) {return s[a] < s[b];}); + P[0].assign(begin(s), end(s)); + iota(begin(SA), end(SA), 0); + ranges::sort(SA, {}, [&](int x) { return s[x]; }); vector<int> x(n); for (int k = 1, c = 1; c < n; k++, c *= 2) { - iota(all(x), n - c); + iota(begin(x), end(x), n - c); for (int ptr = c; int i : SA) if (i >= c) x[ptr++] = i - c; vector<int> cnt(k == 1 ? MAX_CHAR : n); for (int i : P[k-1]) cnt[i]++; - partial_sum(all(cnt), begin(cnt)); + partial_sum(begin(cnt), end(cnt), begin(cnt)); for (int i : x | views::reverse) SA[--cnt[P[k-1][i]]] = i; - auto p = [&](int i) {return i < n ? P[k-1][i] : -1;}; + auto p = [&](int i) { return i < n ? P[k-1][i] : -1; }; for (int i = 1; i < n; i++) { int a = SA[i-1], b = SA[i]; P[k][b] = P[k][a] + (p(a) != p(b) || p(a+c) != p(b+c)); @@ -27,10 +27,11 @@ struct SuffixArray { for (int i = 1; i < n; i++) LCP[i] = lcp(SA[i-1], SA[i]); } - int lcp(int x, int y) {//x & y are text-indices, not SA-indices + // x & y are text-indices, not SA-indices + int lcp(int x, int y) { if (x == y) return n - x; int res = 0; - for (int i = sz(P) - 1; i >= 0 && max(x, y) + res < n; i--) { + for (int i = ssize(P)-1; i >= 0 && max(x, y) + res < n; i--){ if (P[i][x + res] == P[i][y + res]) res |= 1 << i; } return res; diff --git a/content/string/suffixAutomaton.cpp b/content/string/suffixAutomaton.cpp index 9a68cb3..f9aa80b 100644 --- a/content/string/suffixAutomaton.cpp +++ b/content/string/suffixAutomaton.cpp @@ -4,20 +4,20 @@ struct SuffixAutomaton { struct State { int len, link = -1; array<int, ALPHABET_SIZE> nxt; // map if large Alphabet - State(int l) : len(l) {fill(all(nxt), -1);} + State(int l): len(l) { ranges::fill(nxt, -1); } }; vector<State> st = {State(0)}; int cur = 0; SuffixAutomaton(const string& s) { - st.reserve(2 * sz(s)); + st.reserve(2 * ssize(s)); for (auto c : s) extend(c - OFFSET); } void extend(int c) { int p = cur; - cur = sz(st); + cur = ssize(st); st.emplace_back(st[p].len + 1); for (; p != -1 && st[p].nxt[c] < 0; p = st[p].link) { st[p].nxt[c] = cur; @@ -33,9 +33,9 @@ struct SuffixAutomaton { st.back().link = st[q].link; st.back().nxt = st[q].nxt; for (; p != -1 && st[p].nxt[c] == q; p = st[p].link) { - st[p].nxt[c] = sz(st) - 1; + st[p].nxt[c] = ssize(st) - 1; } - st[q].link = st[cur].link = sz(st) - 1; + st[q].link = st[cur].link = ssize(st) - 1; }}} vector<int> calculateTerminals() { @@ -49,7 +49,7 @@ struct SuffixAutomaton { // Pair with start index (in t) and length of LCS. pair<int, int> longestCommonSubstring(const string& t) { int v = 0, l = 0, best = 0, bestp = -1; - for (int i = 0; i < sz(t); i++) { + for (int i = 0; i < ssize(t); i++) { int c = t[i] - OFFSET; while (v > 0 && st[v].nxt[c] < 0) { v = st[v].link; diff --git a/content/string/suffixTree.cpp b/content/string/suffixTree.cpp index 7112f39..6362c3e 100644 --- a/content/string/suffixTree.cpp +++ b/content/string/suffixTree.cpp @@ -11,12 +11,12 @@ struct SuffixTree { SuffixTree(const string& s_) : s(s_) { needsSuffix = remainder = curVert = curEdge = curLen = 0; pos = -1; - for (int i = 0; i < sz(s); i++) extend(); + for (int i = 0; i < ssize(s); i++) extend(); } int newVert(int start, int end) { tree.push_back({start, end, 0, {}}); - return sz(tree) - 1; + return ssize(tree) - 1; } void addSuffixLink(int vert) { @@ -42,7 +42,7 @@ struct SuffixTree { while (remainder) { if (curLen == 0) curEdge = pos; if (!tree[curVert].nxt.count(s[curEdge])) { - int leaf = newVert(pos, sz(s)); + int leaf = newVert(pos, ssize(s)); tree[curVert].nxt[s[curEdge]] = leaf; addSuffixLink(curVert); } else { @@ -56,7 +56,7 @@ struct SuffixTree { int split = newVert(tree[nxt].start, tree[nxt].start + curLen); tree[curVert].nxt[s[curEdge]] = split; - int leaf = newVert(pos, sz(s)); + int leaf = newVert(pos, ssize(s)); tree[split].nxt[s[pos]] = leaf; tree[nxt].start += curLen; tree[split].nxt[s[tree[nxt].start]] = nxt; @@ -69,4 +69,4 @@ struct SuffixTree { } else { curVert = tree[curVert].suf ? tree[curVert].suf : 0; }}} -};
\ No newline at end of file +}; diff --git a/content/string/trie.cpp b/content/string/trie.cpp index 4e9f615..64d7beb 100644 --- a/content/string/trie.cpp +++ b/content/string/trie.cpp @@ -3,7 +3,7 @@ constexpr int ALPHABET_SIZE = 2; struct node { int words, ends; array<int, ALPHABET_SIZE> nxt; - node() : words(0), ends(0) {fill(all(nxt), -1);} + node(): words(0), ends(0) { ranges::fill(nxt, -1); } }; vector<node> trie = {node()}; @@ -13,7 +13,7 @@ int traverse(const vector<int>& word, int x) { if (trie[id].words == 0 && x <= 0) return -1; trie[id].words += x; if (trie[id].nxt[c] < 0 && x > 0) { - trie[id].nxt[c] = sz(trie); + trie[id].nxt[c] = ssize(trie); trie.emplace_back(); } id = trie[id].nxt[c]; diff --git a/content/string/z.cpp b/content/string/z.cpp index 069fa38..0d8cafb 100644 --- a/content/string/z.cpp +++ b/content/string/z.cpp @@ -1,5 +1,5 @@ vector<int> Z(const string& s) { - int n = sz(s); + int n = ssize(s); vector<int> z(n); for (int i = 1, x = 0; i < n; i++) { z[i] = max(0, min(z[i - x], x + z[x] - i)); diff --git a/content/tcr.tex b/content/tcr.tex index 6d849d5..46a9a6a 100644 --- a/content/tcr.tex +++ b/content/tcr.tex @@ -6,12 +6,14 @@ ]{scrartcl} % General information. -\newcommand{\teamname}{Kindergarten Timelimit} +\newcommand{\teamname}{Infinite Loopers} \newcommand{\university}{Karlsruhe Institute of Technology} % Options \newif\ifoptional -%\optionaltrue +\ifdefined\OPTIONAL + \optionaltrue +\fi % Font encoding. \usepackage[T1]{fontenc} @@ -44,6 +46,7 @@ % Content. \begin{multicols*}{3} + \raggedcolumns \input{datastructures/datastructures} \input{graph/graph} \input{geometry/geometry} @@ -54,12 +57,6 @@ \input{other/other} \input{template/template} \clearpage - \ifodd\value{page} - \else - \null - \thispagestyle{empty} - \clearpage - \fi \input{tests/test} \end{multicols*} \end{document} diff --git a/content/template/template.cpp b/content/template/template.cpp index 7430d23..7c92f09 100644 --- a/content/template/template.cpp +++ b/content/template/template.cpp @@ -1,17 +1,15 @@ #include <bits/stdc++.h> using namespace std; -#define tsolve int t; cin >> t; while(t--) solve -#define all(x) ::begin(x), ::end(x) -#define sz(x) (ll)::size(x) - +using ii = pair<int, int>; +using vi = vector<int>; using ll = long long; using ld = long double; -void solve() {} +void solve() { +} int main() { - cin.tie(0)->sync_with_stdio(false); - cout << setprecision(16); + cin.tie(0)->sync_with_stdio(0); solve(); } diff --git a/tcr.pdf b/tcr.pdf Binary files differdeleted file mode 100644 index 4b8eab4..0000000 --- a/tcr.pdf +++ /dev/null diff --git a/test/GNUmakefile b/test/GNUmakefile new file mode 100644 index 0000000..cc1b4f5 --- /dev/null +++ b/test/GNUmakefile @@ -0,0 +1,41 @@ + +TESTS = $(basename $(shell find . -path ./awk -prune -o -type f -name '*.cpp' -print)) +AWK = $(basename $(shell find . -type f -name '*.awk')) +CXX = g++ -std=gnu++20 -I awk/ -I ../content/ -O2 -Wall -Wextra -Wshadow -Werror +SAN = -fsanitize=address,undefined -DSANITIZE +TIMEOUT = 300 + +test: $(TESTS:=.ok) $(TESTS:=.san.ok) + +missing: + @find ../content -name '*.cpp' | sed 's|^../content/||' \ + | while read -r f ; do [ -e "$$f" ] || echo "$$f" ; done \ + | sort > missing.tmp + @sort missing.ignore | comm -3 missing.tmp - + @rm missing.tmp + +clean: + rm -f $(TESTS:=.test) $(TESTS:=.ok) $(TESTS:=.san.ok) $(TESTS:=.d) + rm -rf awk/ + +%.ok: %.test + timeout --foreground --verbose $(TIMEOUT) prlimit -s$$((1<<32)) ./$< + @touch $@ + +%.test: %.cpp + $(CXX) -o $@ $< + +%.san.test: %.cpp + $(CXX) $(SAN) -o $@ $< + +awk/%: %.awk ../content/% + @mkdir -p $(dir $@) + awk -f $*.awk < ../content/$* > $@ + +%.d: %.cpp $(addprefix awk/,$(AWK)) + $(CXX) -M -MP -MT '$*.test $*.san.test $*.d' -MF $@ $< + +.PHONY: test clean +.SECONDARY: $(TESTS:=.test) $(addprefix awk/,$(AWK)) + +include $(TESTS:=.d) diff --git a/test/datastructures/LCT.cpp b/test/datastructures/LCT.cpp index a1e37eb..e120b6e 100644 --- a/test/datastructures/LCT.cpp +++ b/test/datastructures/LCT.cpp @@ -73,13 +73,13 @@ struct Naive { } }; dfs_comp(dfs_comp, x); - return seen[Random::integer<int>(sz(seen))]; + return seen[Random::integer<int>(ssize(seen))]; } int randomAdj(int x) { if (adj[x].empty()) return -1; - vector<int> seen(all(adj[x])); - return seen[Random::integer<int>(sz(seen))]; + vector<int> seen(begin(adj[x]), end(adj[x])); + return seen[Random::integer<int>(ssize(seen))]; } }; @@ -179,7 +179,7 @@ void performance_test() { int a = Random::integer<int>(0, N); int b = Random::integer<int>(0, N); ll w = Random::integer<ll>(-1000, 1000); - + t.start(); if (!lct.connected(&lct.nodes[a], &lct.nodes[b])) { lct.link(&lct.nodes[a], &lct.nodes[b]); diff --git a/test/datastructures/dynamicConvexHull.cpp b/test/datastructures/dynamicConvexHull.cpp index 335dbae..cc57d73 100644 --- a/test/datastructures/dynamicConvexHull.cpp +++ b/test/datastructures/dynamicConvexHull.cpp @@ -29,7 +29,7 @@ void stress_test(ll range) { ll got = hd.query(x); ll expected = naive[0](x); - for (auto l : naive) expected = max(expected, l(x)); + for (auto l : naive) expected = min(expected, l(x)); if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; queries++; @@ -49,7 +49,7 @@ void performance_test() { ll m = Random::integer<ll>(-1'000'000'000, 1'000'000'000); ll c = Random::integer<ll>(-1'000'000'000, 1'000'000'000); ll x = Random::integer<ll>(-1'000'000'000, 1'000'000'000); - + t.start(); hd.add(m, c); hash += hd.query(x); diff --git a/test/datastructures/dynamicConvexHull.lichao.cpp b/test/datastructures/dynamicConvexHull.lichao.cpp index d50ca60..f692e92 100644 --- a/test/datastructures/dynamicConvexHull.lichao.cpp +++ b/test/datastructures/dynamicConvexHull.lichao.cpp @@ -8,7 +8,7 @@ void stress_test(ll range) { for (int tries = 0; tries < 1000; tries++) { int n = Random::integer<int>(1, 100); xs = Random::distinct(n, -range, range); - sort(all(xs)); + ranges::sort(xs); HullDynamic hd; Lichao lichao; @@ -16,11 +16,11 @@ void stress_test(ll range) { ll m = Random::integer<ll>(-range, range); ll c = Random::integer<ll>(-range, range); hd.add(m, c); - lichao.insert({-m, -c}); + lichao.insert({m, c}); for (ll x : xs) { ll gotA = hd.query(x); - ll gotB = -lichao.query(x); + ll gotB = lichao.query(x); if (gotA != gotB) cerr << "gotA: " << gotA << ", gotB: " << gotB << FAIL; queries++; diff --git a/test/datastructures/fenwickTree.cpp b/test/datastructures/fenwickTree.cpp index 62e6392..f3c0274 100644 --- a/test/datastructures/fenwickTree.cpp +++ b/test/datastructures/fenwickTree.cpp @@ -23,7 +23,7 @@ void stress_test() { int i = Random::integer<int>(0, n); ll got = prefix_sum(i); ll expected = 0; - for (int j = 0; j <= i; j++) expected += naive[j]; + for (int j = 0; j < i; j++) expected += naive[j]; if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; } } @@ -42,7 +42,7 @@ void performance_test() { int i = Random::integer<int>(0, N); int j = Random::integer<int>(0, N); ll x = Random::integer<ll>(-1000, 1000); - + t.start(); update(i, x); hash ^= prefix_sum(j); diff --git a/test/datastructures/fenwickTree2.cpp b/test/datastructures/fenwickTree2.cpp index 16caa1d..180bd24 100644 --- a/test/datastructures/fenwickTree2.cpp +++ b/test/datastructures/fenwickTree2.cpp @@ -23,7 +23,7 @@ void stress_test() { int i = Random::integer<int>(0, n); ll got = prefix_sum(i); ll expected = 0; - for (int j = 0; j <= i; j++) expected += naive[j]; + for (int j = 0; j < i; j++) expected += naive[j]; if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; } } @@ -44,7 +44,7 @@ void performance_test() { int j = Random::integer<int>(0, N); int k = Random::integer<int>(0, N); ll x = Random::integer<ll>(-1000, 1000); - + t.start(); update(i, j, x); hash ^= prefix_sum(k); diff --git a/test/datastructures/lazyPropagation.cpp b/test/datastructures/lazyPropagation.cpp index 22b75ba..2e7431b 100644 --- a/test/datastructures/lazyPropagation.cpp +++ b/test/datastructures/lazyPropagation.cpp @@ -34,6 +34,39 @@ void stress_test() { cerr << "tested random queries: " << queries << endl; } +void stress_test_binary_search() { + ll queries = 0; + for (int tries = 0; tries < 100; tries++) { + int n = Random::integer<int>(10, 100); + vector<ll> naive = Random::integers<ll>(n, 0, 1000); + SegTree tree(naive); + for (int operations = 0; operations < 1000; operations++) { + { + int l = Random::integer<int>(0, n + 1); + int r = Random::integer<int>(0, n + 1); + //if (l > r) swap(l, r); + ll x = Random::integer<ll>(0, 1000); + tree.update(l, r, x); + for (int j = l; j < r; j++) naive[j] = x; + } + { + queries++; + int l = Random::integer<int>(0, n + 1); + int r = Random::integer<int>(0, n + 1); + ll x = Random::integer<ll>(0, 10000); + //if (l > r) swap(l, r); + int got = tree.binary_search(l, r, [x](ll v) { return v >= x; }); + ll sum; + int j; + for (j = l, sum = 0; j < r && sum < x; j++) sum += naive[j]; + int expected = sum >= x ? j : -1; + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + } + } + cerr << "tested random binary searches: " << queries << endl; +} + void performance_test() { timer t; t.start(); @@ -45,7 +78,7 @@ void performance_test() { auto [l1, r1] = Random::pair<int>(0, N + 1); auto [l2, r2] = Random::pair<int>(0, N + 1); ll x1 = Random::integer<ll>(-1000, 1000); - + t.start(); tree.update(l1, r1, x1); hash ^= tree.query(l2, r2); @@ -55,7 +88,33 @@ void performance_test() { cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } +void performance_test_binary_search() { + timer t; + t.start(); + vector<ll> tmp(N); + SegTree tree(tmp); + t.stop(); + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + auto [l1, r1] = Random::pair<int>(0, N + 1); + auto [l2, r2] = Random::pair<int>(0, N + 1); + ll x1 = Random::integer<ll>(0, 1000); + ll x2 = Random::integer<ll>(0, 1000 * N); + + t.start(); + tree.update(l1, r1, x1); + hash ^= tree.binary_search(l2, r2, [x2](ll v) { return v >= x2; }); + t.stop(); + } + if (t.time > 2000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + int main() { stress_test(); - if (!sanitize) performance_test(); + stress_test_binary_search(); + if (!sanitize) { + performance_test(); + performance_test_binary_search(); + } } diff --git a/test/datastructures/lichao.cpp b/test/datastructures/lichao.cpp index 9cf770f..30d5b58 100644 --- a/test/datastructures/lichao.cpp +++ b/test/datastructures/lichao.cpp @@ -7,7 +7,7 @@ void stress_test(ll range) { for (int tries = 0; tries < 1000; tries++) { int n = Random::integer<int>(1, 100); xs = Random::distinct<ll>(n, -range, range); - sort(all(xs)); + ranges::sort(xs); vector<ll> naive(n, INF); Lichao tree; @@ -42,7 +42,7 @@ constexpr int N = 200'000; void performance_test() { timer t; xs = Random::distinct<ll>(N, -1'000'000'000, 1'000'000'000); - sort(all(xs)); + ranges::sort(xs); t.start(); Lichao tree; diff --git a/test/datastructures/monotonicConvexHull.cpp b/test/datastructures/monotonicConvexHull.cpp index 9490d7e..1c147e3 100644 --- a/test/datastructures/monotonicConvexHull.cpp +++ b/test/datastructures/monotonicConvexHull.cpp @@ -1,7 +1,5 @@ #include "../util.h" -struct MCH { - #include <datastructures/monotonicConvexHull.cpp> -}; +#include <datastructures/monotonicConvexHull.cpp> struct Line { ll m, c; @@ -14,12 +12,12 @@ void stress_test(ll range) { for (int tries = 0; tries < 1000; tries++) { int n = Random::integer<int>(1, 100); auto ms = Random::integers<ll>(n, -range, range); - sort(all(ms), greater<>{}); + ranges::sort(ms | views::reverse); auto cs = ms; for (int l = 0, r = 0; l < n;) { while (r < n && ms[l] == ms[r]) r++; auto tmp = Random::distinct<ll>(r - l, -range, range); - sort(all(tmp), greater<>{}); + ranges::sort(tmp | views::reverse); for (int c : tmp) { cs[l] = c; l++; @@ -27,12 +25,12 @@ void stress_test(ll range) { } auto xs = Random::integers<ll>(n*100, -range*n, range*n); - sort(all(xs)); + ranges::sort(xs); int i = 0; vector<Line> naive; - MCH mch; + Envelope mch; for (int k = 0; k < n; k++) { ll m = ms[k]; ll c = cs[k]; @@ -60,12 +58,12 @@ void stress_test_independent(ll range) { for (int tries = 0; tries < 1000; tries++) { int n = Random::integer<int>(1, 100); auto ms = Random::integers<ll>(n, -range, range); - sort(all(ms), greater<>{}); + ranges::sort(ms | views::reverse); auto cs = ms; for (int l = 0, r = 0; l < n;) { while (r < n && ms[l] == ms[r]) r++; auto tmp = Random::distinct<ll>(r - l, -range, range); - sort(all(tmp), greater<>{}); + ranges::sort(tmp | views::reverse); for (int c : tmp) { cs[l] = c; l++; @@ -74,7 +72,7 @@ void stress_test_independent(ll range) { vector<Line> naive; - MCH mch; + Envelope mch; for (int i = 0; i < n; i++) { ll m = ms[i]; ll c = cs[i]; @@ -83,7 +81,7 @@ void stress_test_independent(ll range) { naive.emplace_back(m, c); auto xs = Random::integers<ll>(100, -range, range); - sort(all(xs)); + ranges::sort(xs); auto tmp = mch; for (auto x : xs) { @@ -103,17 +101,17 @@ constexpr int N = 1'000'000; void performance_test() { timer t; auto ms = Random::distinct<ll>(N, -1'000'000'000, 1'000'000'000); - sort(all(ms), greater<>{}); + ranges::sort(ms | views::reverse); auto xs = Random::distinct<ll>(N, -1'000'000'000, 1'000'000'000); - sort(all(xs)); - MCH mch; + ranges::sort(xs); + Envelope mch; hash_t hash = 0; for (int operations = 0; operations < N; operations++) { ll c = Random::integer<ll>(-1'000'000'000, 1'000'000'000); ll m = ms[operations]; ll x = xs[operations]; - + t.start(); mch.add(m, c); hash += mch.query(x); diff --git a/test/datastructures/pbds.cpp b/test/datastructures/pbds.cpp deleted file mode 100644 index 9080332..0000000 --- a/test/datastructures/pbds.cpp +++ /dev/null @@ -1,11 +0,0 @@ -#include "../util.h" -#include <datastructures/pbds.cpp> - -int main() { - Tree<int> t1, t2; - swap(t1, t2); - hashSet<int> s1, s2; - swap(s1, s2); - hashMap<int, int> m1, m2; - swap(m1, m2); -}
\ No newline at end of file diff --git a/test/datastructures/persistentArray.cpp b/test/datastructures/persistentArray.cpp index 6712089..ef8e52b 100644 --- a/test/datastructures/persistentArray.cpp +++ b/test/datastructures/persistentArray.cpp @@ -24,19 +24,19 @@ void stress_test() { cur[j] = x; expected.emplace_back(t, cur); } else if (op <= 16) { - if (sz(expected) < 1) continue; - int j = Random::integer<int>(0, sz(expected)); + if (ssize(expected) < 1) continue; + int j = Random::integer<int>(0, ssize(expected)); for (int k = 0; k < m; k++) { if (got.get(k, expected[j].first) != expected[j].second[k]) cerr << "got: " << got.get(k, expected[j].first) << ", expected: " << expected[j].second[k] << FAIL; } } else { - if (sz(expected) < 1) continue; - int j = Random::integer<int>(0, sz(expected)); + if (ssize(expected) < 1) continue; + int j = Random::integer<int>(0, ssize(expected)); got.reset(expected[j].first); expected.resize(j + 1); cur = expected.back().second; } - + } queries += n; } diff --git a/test/datastructures/segmentTree.cpp b/test/datastructures/segmentTree.cpp index 39cf20f..166dfd2 100644 --- a/test/datastructures/segmentTree.cpp +++ b/test/datastructures/segmentTree.cpp @@ -47,7 +47,7 @@ void performance_test1() { int i = Random::integer<int>(0, N); auto [l, r] = Random::pair<int>(0, N + 1); ll x = Random::integer<ll>(-1000, 1000); - + t.start(); tree.update(i, x); hash ^= tree.query(l, r); @@ -68,7 +68,7 @@ void stress_test2() { vector<ll> naive(n); SegTree tree(naive); naive = Random::integers<ll>(n, -1000, 1000); - copy(all(naive), tree.tree.begin() + n); + ranges::copy(naive, tree.tree.begin() + n); for (int operations = 0; operations < 1000; operations++) { { int l = Random::integer<int>(0, n + 1); @@ -102,7 +102,7 @@ void performance_test2() { int i = Random::integer<int>(0, N); auto [l, r] = Random::pair<int>(0, N + 1); ll x = Random::integer<ll>(-1000, 1000); - + t.start(); tree.modify(l, r, x); hash ^= tree.query(i); diff --git a/test/datastructures/sparseTable.cpp b/test/datastructures/sparseTable.cpp index 9a7fac5..078f336 100644 --- a/test/datastructures/sparseTable.cpp +++ b/test/datastructures/sparseTable.cpp @@ -8,13 +8,13 @@ void stress_test() { int n = Random::integer<int>(1, 100); vector<ll> naive = Random::integers<ll>(n, -1000, 1000); SparseTable st; - st.init(&naive); + st.init(naive); for (int operations = 0; operations < 1000; operations++) { queries++; int l = Random::integer<int>(0, n+1); int r = Random::integer<int>(0, n+1); - ll got = st.queryIdempotent(l, r); + ll got = st.query(l, r); ll expected = r <= l ? -1 : l; for (int j = l; j < r; j++) { if (naive[j] < naive[expected]) expected = j; @@ -31,14 +31,14 @@ void performance_test() { vector<ll> naive = Random::integers<ll>(N, -1000, 1000); t.start(); SparseTable st; - st.init(&naive); + st.init(naive); t.stop(); hash_t hash = 0; for (int operations = 0; operations < N; operations++) { auto [l, r] = Random::pair<int>(0, N+1); - + t.start(); - hash += st.queryIdempotent(l, r); + hash += st.query(l, r); t.stop(); } if (t.time > 500) cerr << "too slow: " << t.time << FAIL; diff --git a/test/datastructures/sparseTableDisjoint.cpp b/test/datastructures/sparseTableDisjoint.cpp index 1147b42..d3f42ba 100644 --- a/test/datastructures/sparseTableDisjoint.cpp +++ b/test/datastructures/sparseTableDisjoint.cpp @@ -7,7 +7,7 @@ void stress_test() { int n = Random::integer<int>(1, 100); vector<ll> naive = Random::integers<ll>(n, -1000, 1000); DisjointST st; - st.init(&naive); + st.init(naive); for (int operations = 0; operations < 1000; operations++) { queries++; int l = Random::integer<int>(0, n+1); @@ -28,12 +28,12 @@ void performance_test() { vector<ll> naive = Random::integers<ll>(N, -1000, 1000); t.start(); DisjointST st; - st.init(&naive); + st.init(naive); t.stop(); hash_t hash = 0; for (int operations = 0; operations < N; operations++) { auto [l, r] = Random::pair<int>(0, N+1); - + t.start(); hash += st.query(l, r); t.stop(); diff --git a/test/datastructures/stlHashMap.cpp b/test/datastructures/stlHashMap.cpp deleted file mode 100644 index 77976fd..0000000 --- a/test/datastructures/stlHashMap.cpp +++ /dev/null @@ -1,4 +0,0 @@ -#include "../util.h" -#include <datastructures/stlHashMap.cpp> - -int main() {}
\ No newline at end of file diff --git a/test/datastructures/stlPriorityQueue.cpp b/test/datastructures/stlPriorityQueue.cpp deleted file mode 100644 index 669f4d4..0000000 --- a/test/datastructures/stlPriorityQueue.cpp +++ /dev/null @@ -1,6 +0,0 @@ -#include "../util.h" -#include <datastructures/stlPriorityQueue.cpp> - -int main() { - test(); -}
\ No newline at end of file diff --git a/test/datastructures/stlPriorityQueue.cpp.awk b/test/datastructures/stlPriorityQueue.cpp.awk deleted file mode 100644 index 99d0fb9..0000000 --- a/test/datastructures/stlPriorityQueue.cpp.awk +++ /dev/null @@ -1,37 +0,0 @@ -/auto/ { - print "void test() {" - print "pQueue<ll> pq, pq2;" - print "pq.push(1);" - print "pq.push(5);" - print "pq.push(7);" - print "pq2.push(2);" - print "pq2.push(4);" - print "pq2.push(8);" -} -END { - print "if (pq.empty()) cerr << \"error: empty\" << FAIL;" - print "if (pq.top() != 8) cerr << \"error, got: \" << pq.top() << \", expected: 8\" << FAIL;" - print "pq.pop();" - print "if (pq.empty()) cerr << \"error: empty\" << FAIL;" - print "if (pq.top() != 7) cerr << \"error, got: \" << pq.top() << \", expected: 7\" << FAIL;" - print "pq.pop();" - print "if (pq.empty()) cerr << \"error: empty\" << FAIL;" - print "if (pq.top() != 6) cerr << \"error, got: \" << pq.top() << \", expected: 6\" << FAIL;" - print "pq.pop();" - print "if (pq.empty()) cerr << \"error: empty\" << FAIL;" - print "if (pq.top() != 5) cerr << \"error, got: \" << pq.top() << \", expected: 5\" << FAIL;" - print "pq.pop();" - print "if (pq.empty()) cerr << \"error: empty\" << FAIL;" - print "if (pq.top() != 4) cerr << \"error, got: \" << pq.top() << \", expected: 4\" << FAIL;" - print "pq.pop();" - print "if (pq.empty()) cerr << \"error: empty\" << FAIL;" - print "if (pq.top() != 2) cerr << \"error, got: \" << pq.top() << \", expected: 2\" << FAIL;" - print "pq.pop();" - print "if (pq.empty()) cerr << \"error: empty\" << FAIL;" - print "if (pq.top() != 1) cerr << \"error, got: \" << pq.top() << \", expected: 1\" << FAIL;" - print "pq.pop();" - print "if (!pq.empty()) cerr << \"error, got: \" << pq.top() << \", expected: empty\" << FAIL;" - print "cerr << \"testes example\" << endl;" - print "}" -} -{ print } diff --git a/test/datastructures/stlRope.cpp b/test/datastructures/stlRope.cpp index 669f4d4..7405e4e 100644 --- a/test/datastructures/stlRope.cpp +++ b/test/datastructures/stlRope.cpp @@ -1,6 +1,6 @@ #include "../util.h" -#include <datastructures/stlPriorityQueue.cpp> +#include <datastructures/stlRope.cpp> int main() { test(); -}
\ No newline at end of file +} diff --git a/test/datastructures/stlRope.cpp.awk b/test/datastructures/stlRope.cpp.awk index e19b8fd..df7c361 100644 --- a/test/datastructures/stlRope.cpp.awk +++ b/test/datastructures/stlRope.cpp.awk @@ -20,7 +20,7 @@ print "vector<int> got, expected = {0,1,6,2,3,4,5,7};" } END { - print " got.push_back(*it)" + print " got.push_back(*it);" print "if (got != expected) cerr << \"error\" << endl;" print "}" } diff --git a/test/datastructures/stlTree.cpp b/test/datastructures/stlTree.cpp deleted file mode 100644 index 7bacbee..0000000 --- a/test/datastructures/stlTree.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#include "../util.h" -#include <datastructures/stlTree.cpp> diff --git a/test/datastructures/treap.cpp b/test/datastructures/treap.cpp index 9a3527e..4f0fe03 100644 --- a/test/datastructures/treap.cpp +++ b/test/datastructures/treap.cpp @@ -26,14 +26,14 @@ void stress_test(int T, int n) { if (a.empty()) is_ins = true; if (is_ins) { - int ind = Random::integer<int>(0, (int)sz(a)+1); + int ind = Random::integer<int>(0, (int)ssize(a)+1); ll val = Random::integer((ll)-1e18, (ll)1e18+1); t.insert(ind, val); a.insert(a.begin() + ind, val); ins--; } else { - int ind = Random::integer<int>(0, (int)sz(a)); - int cnt = Random::integer<int>(1, 1 + min<int>({(int)sz(a)-ind, rem, (int)sqrt(n)})); + int ind = Random::integer<int>(0, (int)ssize(a)); + int cnt = Random::integer<int>(1, 1 + min<int>({(int)ssize(a)-ind, rem, (int)sqrt(n)})); t.remove(ind, cnt); a.erase(a.begin() + ind, a.begin() + ind + cnt); rem -= cnt; diff --git a/test/datastructures/unionFind.cpp b/test/datastructures/unionFind.cpp index 50ad50d..ced2355 100644 --- a/test/datastructures/unionFind.cpp +++ b/test/datastructures/unionFind.cpp @@ -1,8 +1,5 @@ #include "../util.h" -struct UF { - UF(int n) {init(n);} - #include <datastructures/unionFind.cpp> -}; +#include <datastructures/unionFind.cpp> struct Naive { vector<vector<int>> adj; @@ -28,15 +25,18 @@ struct Naive { } } - int findSet(int a) { + int find(int a) { int res = a; dfs(a, [&](int x){res = min(res, x);}); return res; } - void unionSets(int a, int b) { + bool link(int a, int b) { + bool linked = false; + dfs(a, [&](int x) { linked |= x == b; }); adj[a].push_back(b); adj[b].push_back(a); + return !linked; } int size(int a) { @@ -44,22 +44,38 @@ struct Naive { dfs(a, [&](int /**/){res++;}); return res; } + + int add() { + int idx = ssize(adj); + adj.emplace_back(); + seen.push_back(counter); + return idx; + } }; void stress_test() { ll queries = 0; for (int tries = 0; tries < 200; tries++) { int n = Random::integer<int>(1, 100); - UF uf(n); + UnionFind uf(n); Naive naive(n); - for (int i = 0; i < n; i++) { + int rounds = n; + for (int i = 0; i < rounds; i++) { for (int j = 0; j < 10; j++) { int a = Random::integer<int>(0, n); int b = Random::integer<int>(0, n); - uf.unionSets(a, b); - naive.unionSets(a, b); + auto got = uf.link(a, b); + auto expected = naive.link(a, b); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; } - UF tmp = uf; + { + auto got = uf.add(); + auto expected = naive.add(); + assert(expected == n); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + n++; + } + UnionFind tmp = uf; for (int j = 0; j < n; j++) { { auto got = tmp.size(j); @@ -69,8 +85,8 @@ void stress_test() { { int a = Random::integer<int>(0, n); int b = Random::integer<int>(0, n); - bool got = tmp.findSet(a) == tmp.findSet(b); - bool expected = naive.findSet(a) == naive.findSet(b); + bool got = tmp.find(a) == tmp.find(b); + bool expected = naive.find(a) == naive.find(b); if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; } } @@ -84,7 +100,7 @@ constexpr int N = 2'000'000; void performance_test() { timer t; t.start(); - UF uf(N); + UnionFind uf(N); t.stop(); hash_t hash = 0; for (int operations = 0; operations < N; operations++) { @@ -92,9 +108,9 @@ void performance_test() { int j = Random::integer<int>(0, N); int k = Random::integer<int>(0, N); int l = Random::integer<int>(0, N); - + t.start(); - uf.unionSets(i, j); + uf.link(i, j); hash += uf.size(k); hash += uf.size(l); t.stop(); diff --git a/test/datastructures/waveletTree.cpp b/test/datastructures/waveletTree.cpp index 4c51b60..06b3e03 100644 --- a/test/datastructures/waveletTree.cpp +++ b/test/datastructures/waveletTree.cpp @@ -20,7 +20,7 @@ void stress_test() { ll expected = -1; if (x >= 0 && l + x < r) { vector<ll> tmp(naive.begin() + l, naive.begin() + r); - std::sort(all(tmp)); + ranges::sort(tmp); expected = tmp[x]; } if (got != expected) { @@ -59,7 +59,7 @@ void performance_test() { auto [l2, r2] = Random::pair<int>(0, N + 1); int x1 = Random::integer<ll>(l1, r1 + 1); ll x2 = Random::integer<ll>(-1000, 1000); - + t.start(); hash ^= tree.kth(l1, r1, x1); hash ^= tree.countSmaller(l2, r2, x2); diff --git a/test/fuzz.sh b/test/fuzz.sh deleted file mode 100755 index c166506..0000000 --- a/test/fuzz.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -set -e -cd "$(dirname "$0")" - -while true -do - seed="0" - while [[ $seed == 0* ]]; do - seed=$(tr -dc '0-9' </dev/random | head -c 18) - done - echo "Fuzz using seed: $seed" - echo - ./test.sh --seed=$seed "$@" -done diff --git a/test/geometry.h b/test/geometry.h index 0167d5c..06520c7 100644 --- a/test/geometry.h +++ b/test/geometry.h @@ -26,7 +26,7 @@ namespace Random { vector<ll> partition(ll n, std::size_t k){//min = 0; n += k; vector<ll> res = Random::distinct<ll>(k-1, 1, n); - sort(all(res)); + ranges::sort(res); res.emplace_back(n); ll last = 0; for (std::size_t i = 0; i < k; i++) { @@ -137,4 +137,4 @@ namespace Random { while (ccw(a, b, c) == 0) c = integerPoint(range); return {a, b, c}; } -}
\ No newline at end of file +} diff --git a/test/geometry/antipodalPoints.cpp b/test/geometry/antipodalPoints.cpp index 66f063b..ec2006e 100644 --- a/test/geometry/antipodalPoints.cpp +++ b/test/geometry/antipodalPoints.cpp @@ -9,7 +9,7 @@ constexpr ll EPS = 0; #include "../geometry.h" vector<pair<int, int>> naive(vector<pt> ps) { - ll n = sz(ps); + ll n = ssize(ps); auto test = [&](int i, int j){ if (dot(ps[j] - ps[i], ps[i - 1] - ps[i]) <= 0) return false; if (dot(ps[j] - ps[i], ps[i + 1] - ps[i]) <= 0) return false; @@ -34,13 +34,13 @@ void stress_test(ll range) { auto got = antipodalPoints(ps); for (auto& [a, b] : got) if (a > b) swap(a, b); - sort(all(got)); + ranges::sort(got); auto expected = naive(ps); for (auto& [a, b] : expected) if (a > b) swap(a, b); for (auto x : expected) { - auto it = lower_bound(all(got), x); + auto it = ranges::lower_bound(got, x); if (it == got.end() || *it != x) cerr << "error" << FAIL; } queries += n; @@ -58,7 +58,7 @@ void performance_test() { auto got = antipodalPoints(ps); t.stop(); - hash_t hash = sz(got); + hash_t hash = ssize(got); if (t.time > 50) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/geometry/circle.cpp b/test/geometry/circle.cpp index 3d3d27d..dc975ff 100644 --- a/test/geometry/circle.cpp +++ b/test/geometry/circle.cpp @@ -46,9 +46,9 @@ void test_circleIntersection(ll range) { auto got = circleIntersection(c1, r1, c2, r2); - if (sz(got) != expectedCount(real(c1), imag(c1), r1, real(c2), imag(c2), r2)) cerr << "error: wrong count" << FAIL; + if (ssize(got) != expectedCount(real(c1), imag(c1), r1, real(c2), imag(c2), r2)) cerr << "error: wrong count" << FAIL; - for (int i = 0; i < sz(got); i++) { + for (int i = 0; i < ssize(got); i++) { for (int j = 0; j < i; j++) { if (abs(got[i] - got[j]) < 1e-6) cerr << "error: identical" << FAIL; } @@ -58,7 +58,7 @@ void test_circleIntersection(ll range) { if (float_error(abs(c1 - p), r1) > 1e-6) cerr << "error: 1" << FAIL; if (float_error(abs(c2 - p), r2) > 1e-6) cerr << "error: 2" << FAIL; } - queries += sz(got); + queries += ssize(got); } cerr << "tested circleIntersection: " << queries << endl; } @@ -91,9 +91,9 @@ void test_circleRayIntersection(ll range) { else expected = 1; } - if (sz(got) != expected) cerr << "error: wrong count" << FAIL; + if (ssize(got) != expected) cerr << "error: wrong count" << FAIL; - for (int i = 0; i < sz(got); i++) { + for (int i = 0; i < ssize(got); i++) { for (int j = 0; j < i; j++) { if (abs(got[i] - got[j]) < 1e-6) cerr << "error: identical" << FAIL; } @@ -103,7 +103,7 @@ void test_circleRayIntersection(ll range) { if (float_error(abs(c - p), r) > 1e-6) cerr << "error: 1" << FAIL; if (distToLine(orig, orig + dir, p) > 1e-6) cerr << "error: 2" << FAIL; } - queries += sz(got); + queries += ssize(got); } cerr << "tested circleIntersection: " << queries << endl; } diff --git a/test/geometry/closestPair.cpp b/test/geometry/closestPair.cpp index 99f9d5e..a8e1382 100644 --- a/test/geometry/closestPair.cpp +++ b/test/geometry/closestPair.cpp @@ -13,7 +13,7 @@ ll isqrt(ll x) {return (ll)sqrtl(x);} //strict convex hull ll naive(const vector<pt>& ps) { ll opt = LL::INF; - for (ll i = 0; i < sz(ps); i++) { + for (ll i = 0; i < ssize(ps); i++) { for (ll j = 0; j < i; j++) { opt = min(opt, norm(ps[i] - ps[j])); } diff --git a/test/geometry/closestPair.double.cpp b/test/geometry/closestPair.double.cpp index 427fcf8..14ccd0d 100644 --- a/test/geometry/closestPair.double.cpp +++ b/test/geometry/closestPair.double.cpp @@ -10,7 +10,7 @@ constexpr ll INF = LL::INF; //strict convex hull double naive(const vector<pt>& ps) { double opt = LL::INF; - for (ll i = 0; i < sz(ps); i++) { + for (ll i = 0; i < ssize(ps); i++) { for (ll j = 0; j < i; j++) { opt = min(opt, norm(ps[i] - ps[j])); } diff --git a/test/geometry/convexHull.cpp b/test/geometry/convexHull.cpp index 8a5ad9b..ee858f9 100644 --- a/test/geometry/convexHull.cpp +++ b/test/geometry/convexHull.cpp @@ -9,7 +9,7 @@ constexpr ll EPS = 0; //strict convex hull ll isConvexHull(const vector<pt>& ps, const vector<pt>& hull) { - ll n = sz(hull) - 1; + ll n = ssize(hull) - 1; if (n == 0) { for (pt p : ps) if (p != hull[0]) return 1; return 0; @@ -67,7 +67,7 @@ void performance_test() { t.start(); auto a = convexHull(ps); t.stop(); - hash_t hash = sz(a); + hash_t hash = ssize(a); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/geometry/delaunay.cpp b/test/geometry/delaunay.cpp index 06ad6b5..51df879 100644 --- a/test/geometry/delaunay.cpp +++ b/test/geometry/delaunay.cpp @@ -6,28 +6,27 @@ auto cross(pt p, pt a, pt b) {return cross(a - p, b - p);} #pragma GCC diagnostic ignored "-Wunused-variable" #include <geometry/delaunay.cpp> + vector<pt> convexHull(vector<pt> pts){ - sort(all(pts), [](const pt& a, const pt& b){ - return real(a) == real(b) ? imag(a) < imag(b) - : real(a) < real(b); - }); - pts.erase(unique(all(pts)), pts.end()); + ranges::sort(pts, {}, + [](pt x) { return pair{real(x), imag(x)}; }); + pts.erase(begin(ranges::unique(pts)), end(pts)); int k = 0; - vector<pt> h(2 * sz(pts)); - auto half = [&](auto begin, auto end, int t) { - for (auto it = begin; it != end; it++) { - while (k > t && cross(h[k-2], h[k-1], *it) < 0) k--; //allow collinear points! - h[k++] = *it; + vector<pt> h(2 * ssize(pts)); + auto half = [&](auto &&v, int t) { + for (auto x: v) { + while (k > t && cross(h[k-2], h[k-1], x) < 0) k--; // allow collinear points + h[k++] = x; }}; - half(all(pts), 1); // Untere Hülle. - half(next(pts.rbegin()), pts.rend(), k); // Obere Hülle. + half(pts, 1); // Untere Hülle. + half(pts | views::reverse | views::drop(1), k); // Obere Hülle h.resize(k); return h; } lll area(const vector<pt>& poly) { //poly[0] == poly.back() lll res = 0; - for (int i = 0; i + 1 < sz(poly); i++) + for (int i = 0; i + 1 < ssize(poly); i++) res += cross(poly[i], poly[i + 1]); return res; } @@ -89,15 +88,15 @@ void stress_test(ll LIM, ll range) { hull.pop_back(); auto got = delaunay(ps); - if (sz(got) % 3 != 0) cerr << "error: not triangles" << FAIL; - if (sz(got) / 3 + sz(hull) - 3 + 1 != 2 * sz(ps) - 4) cerr << "error: wrong number" << FAIL; + if (ssize(got) % 3 != 0) cerr << "error: not triangles" << FAIL; + if (ssize(got) / 3 + ssize(hull) - 3 + 1 != 2 * ssize(ps) - 4) cerr << "error: wrong number" << FAIL; //all triangles should be oriented ccw lll gotArea = 0; - for (int i = 0; i < sz(got); i += 3) gotArea += cross(got[i], got[i+1], got[i+2]); + for (int i = 0; i < ssize(got); i += 3) gotArea += cross(got[i], got[i+1], got[i+2]); if (gotArea != expectedArea) cerr << "error: wrong area" << FAIL; - for (int i = 0; i < sz(got); i++) { + for (int i = 0; i < ssize(got); i++) { int ii = i + 1; if (i / 3 != ii / 3) ii -= 3; for (int j = 0; j < i; j++) { @@ -111,7 +110,7 @@ void stress_test(ll LIM, ll range) { for (pt p : ps) seen |= p == got[i]; if (!seen) cerr << "error: invalid point" << FAIL; } - for (int i = 0; i < sz(got); i += 3) { + for (int i = 0; i < ssize(got); i += 3) { for (pt p : ps) { if (p == got[i]) continue; if (p == got[i+1]) continue; @@ -131,7 +130,7 @@ void performance_test() { t.start(); auto got = delaunay(ps); t.stop(); - hash_t hash = sz(got); + hash_t hash = ssize(got); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/geometry/formulas.cpp b/test/geometry/formulas.cpp index d63d431..f472e1f 100644 --- a/test/geometry/formulas.cpp +++ b/test/geometry/formulas.cpp @@ -107,7 +107,7 @@ void test_uniqueAngle(ll range) { if (it->second != expected) cerr << "error: inconsistent" << FAIL; queries++; } - cerr << "tested uniqueAngle: " << queries << " (" << sz(seen) << ")" << endl; + cerr << "tested uniqueAngle: " << queries << " (" << ssize(seen) << ")" << endl; } int main() { diff --git a/test/geometry/hpi.cpp b/test/geometry/hpi.cpp index a2326bc..e22e8c6 100644 --- a/test/geometry/hpi.cpp +++ b/test/geometry/hpi.cpp @@ -1,4 +1,6 @@ #include "../util.h" +#define sz(X) (ll)::size(X) +#define all(X) ::begin(X), ::end(X) constexpr ll EPS = 0; #define double ll #define polar polar<ll> @@ -14,10 +16,10 @@ ll sgn(ll x) { //https://cp-algorithms.com/geometry/halfplane-intersection.html namespace cpalgo { // Redefine epsilon and infinity as necessary. Be mindful of precision errors. - const long double eps = 1e-9, inf = 1e9; + const long double eps = 1e-9, inf = 1e9; // Basic point/vector struct. - struct Point { + struct Point { long double x, y; explicit Point(long double x_ = 0, long double y_ = 0) : x(x_), y(y_) {} @@ -26,23 +28,23 @@ namespace cpalgo { // Addition, substraction, multiply by constant, dot product, cross product. friend Point operator + (const Point& p, const Point& q) { - return Point(p.x + q.x, p.y + q.y); + return Point(p.x + q.x, p.y + q.y); } - friend Point operator - (const Point& p, const Point& q) { - return Point(p.x - q.x, p.y - q.y); + friend Point operator - (const Point& p, const Point& q) { + return Point(p.x - q.x, p.y - q.y); } - friend Point operator * (const Point& p, const long double& k) { - return Point(p.x * k, p.y * k); - } + friend Point operator * (const Point& p, const long double& k) { + return Point(p.x * k, p.y * k); + } friend long double dot(const Point& p, const Point& q) { return p.x * q.x + p.y * q.y; } - friend long double cross(const Point& p, const Point& q) { - return p.x * q.y - p.y * q.x; + friend long double cross(const Point& p, const Point& q) { + return p.x * q.y - p.y * q.x; } friend std::ostream& operator<<(std::ostream& os, const Point& p) { @@ -53,10 +55,10 @@ namespace cpalgo { }; // Basic half-plane struct. - struct Halfplane { + struct Halfplane { // 'p' is a passing point of the line and 'pq' is the direction vector of the line. - Point p, pq; + Point p, pq; long double angle; Halfplane() {} @@ -66,16 +68,16 @@ namespace cpalgo { Halfplane(array<pt, 2> ps) : Halfplane(ps[0], ps[1]) {} Halfplane(hp h) : Halfplane(h.from, h.to) {} - // Check if point 'r' is outside this half-plane. + // Check if point 'r' is outside this half-plane. // Every half-plane allows the region to the LEFT of its line. bool out(const Point& r) { - return cross(pq, r - p) < -eps; + return cross(pq, r - p) < -eps; } - // Comparator for sorting. - bool operator < (const Halfplane& e) const { + // Comparator for sorting. + bool operator < (const Halfplane& e) const { return angle < e.angle; - } + } // Intersection point of the lines of two half-planes. It is assumed they're never parallel. friend Point inter(const Halfplane& s, const Halfplane& t) { @@ -89,13 +91,13 @@ namespace cpalgo { }; // Actual algorithm - vector<Point> hp_intersect(vector<Halfplane>& H) { + vector<Point> hp_intersect(vector<Halfplane>& H) { /*Point box[4] = { // Bounding box in CCW order - Point(inf, inf), - Point(-inf, inf), - Point(-inf, -inf), - Point(inf, -inf) + Point(inf, inf), + Point(-inf, inf), + Point(-inf, -inf), + Point(inf, -inf) }; for(int i = 0; i<4; i++) { // Add bounding box half-planes. @@ -181,7 +183,7 @@ void test_check(ll range) { auto b = Random::line(range); auto c = b; while (cross(b[0] - b[1], c[0] - c[1]) == 0) c = Random::line(range); - + bool got = hp(a[0], a[1]).check(hp(b[0], b[1]), hp(c[0], c[1])); bool expected = naiveCheck(a, b, c); diff --git a/test/geometry/polygon.cpp b/test/geometry/polygon.cpp index 2653dbd..1d9f828 100644 --- a/test/geometry/polygon.cpp +++ b/test/geometry/polygon.cpp @@ -135,7 +135,7 @@ void test_insideConvex(ll LIM, ll range) { // convex hull without duplicates, h[0] != h.back() // apply comments if border counts as inside bool insideOrOnConvex(pt p, const vector<pt>& hull) { - int l = 0, r = sz(hull) - 1; + int l = 0, r = ssize(hull) - 1; if (cross(hull[0], hull[r], p) > 0) return false; while (l + 1 < r) { int m = (l + r) / 2; @@ -155,7 +155,7 @@ void test_minkowski(ll LIM, ll range) { auto got = minkowski(A, B); bool convex = true; - for (int i = 0; i < sz(got); i++) convex &= cross(got[i], got[(i+1) % sz(got)], got[(i+2) % sz(got)]) >= 0; + for (int i = 0; i < ssize(got); i++) convex &= cross(got[i], got[(i+1) % ssize(got)], got[(i+2) % ssize(got)]) >= 0; if (!convex) cerr << "error: not convex" << FAIL; for (pt a : A) { @@ -172,19 +172,19 @@ double naive_dist(const vector<pt>& ps, const vector<pt>& qs) { //check if intersect double res = LD::INF; bool intersect = true; - for (int i = 0; i < sz(qs); i++) { + for (int i = 0; i < ssize(qs); i++) { bool sep = true; for (pt p : ps) { - res = min(res, distToSegment(qs[i], qs[(i+1) % sz(qs)], p)); - sep &= cross(qs[i], qs[(i+1) % sz(qs)], p) <= 0; + res = min(res, distToSegment(qs[i], qs[(i+1) % ssize(qs)], p)); + sep &= cross(qs[i], qs[(i+1) % ssize(qs)], p) <= 0; } if (sep) intersect = false; } - for (int i = 0; i < sz(ps); i++) { + for (int i = 0; i < ssize(ps); i++) { bool sep = true; for (pt q : qs) { - res = min(res, distToSegment(ps[i], ps[(i+1) % sz(ps)], q)); - sep &= cross(ps[i], ps[(i+1) % sz(ps)], q) <= 0; + res = min(res, distToSegment(ps[i], ps[(i+1) % ssize(ps)], q)); + sep &= cross(ps[i], ps[(i+1) % ssize(ps)], q) <= 0; } if (sep) intersect = false; } @@ -263,10 +263,10 @@ void test_intersect(ll LIM, ll range) { } } } - if (sz(expected) > 1 && expected[0] == expected[1]) expected.pop_back(); + if (ssize(expected) > 1 && expected[0] == expected[1]) expected.pop_back(); - sort(all(got)); - sort(all(expected)); + ranges::sort(got); + ranges::sort(expected); if (got != expected) cerr << "error" << FAIL; diff --git a/test/geometry/segmentIntersection.cpp b/test/geometry/segmentIntersection.cpp index 0f67eb2..f48fb8a 100644 --- a/test/geometry/segmentIntersection.cpp +++ b/test/geometry/segmentIntersection.cpp @@ -40,7 +40,7 @@ vector<seg> randomSegs(int n, ll range) { } bool naive(vector<seg>& segs) { - for (ll i = 0; i < sz(segs); i++) { + for (ll i = 0; i < ssize(segs); i++) { for (ll j = 0; j < i; j++) { if (segmentIntersection(segs[i].a, segs[i].b, segs[j].a, segs[j].b)) return true; } diff --git a/test/geometry/sortAround.cpp b/test/geometry/sortAround.cpp index abd803e..895a6d6 100644 --- a/test/geometry/sortAround.cpp +++ b/test/geometry/sortAround.cpp @@ -24,7 +24,7 @@ void test_tiny() { }; auto got = expected; for (int i = 0; i < 100'000; i++) { - shuffle(all(got), Random::rng); + ranges::shuffle(got, Random::rng); sortAround(0, got); if (got != expected) cerr << "error" << FAIL; } @@ -51,8 +51,8 @@ void stress_test(ll range) { auto isLeft = [&](pt p){return real(p - c) < 0 || (real(p - c) == 0 && imag(p - c) < 0);}; auto isCCW = [&](pt a, pt b){return cross(c, a, b) > 0;}; - if (!is_partitioned(all(ps), isLeft)) cerr << "error 1" << FAIL; - auto mid = partition_point(all(ps), isLeft); + if (!ranges::is_partitioned(ps, isLeft)) cerr << "error 1" << FAIL; + auto mid = ranges::partition_point(ps, isLeft); if (!is_sorted(ps.begin(), mid, isCCW)) cerr << "error 2" << FAIL; if (!is_sorted(mid, ps.end(), isCCW)) cerr << "error 3" << FAIL; queries += n; diff --git a/test/graph/2sat.cpp b/test/graph/2sat.cpp index 4635086..fd6326c 100644 --- a/test/graph/2sat.cpp +++ b/test/graph/2sat.cpp @@ -25,7 +25,7 @@ struct RandomClause { return false; } - void add(sat2& sat) const { + void add(SAT2& sat) const { int va = a; int vb = b; if (type == 0) sat.addImpl(va, vb); @@ -80,9 +80,8 @@ void stress_test() { vector<RandomClause> clauses; for (int i = 0; i < m; i++) clauses.emplace_back(n); - sat2 sat(n); + SAT2 sat(n); for (auto& c : clauses) c.add(sat); - adj = sat.adj; bool got = sat.solve(); bool expected = naive(n, clauses); @@ -113,11 +112,8 @@ void performance_test() { vector<RandomClause> clauses; for (int i = 0; i < M; i++) clauses.emplace_back(N); t.start(); - sat2 sat(N); + SAT2 sat(N); for (auto& c : clauses) c.add(sat); - t.stop(); - adj = sat.adj; - t.start(); hash_t hash = sat.solve(); t.stop(); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; diff --git a/test/graph/2sat.cpp.awk b/test/graph/2sat.cpp.awk deleted file mode 100644 index d0215d8..0000000 --- a/test/graph/2sat.cpp.awk +++ /dev/null @@ -1,6 +0,0 @@ -/scc variablen/ { - print; - print "\tvector<vector<int>> adj;"; - next -} -{ print } diff --git a/test/graph/TSP.cpp b/test/graph/TSP.cpp index 930ec88..3b1ce94 100644 --- a/test/graph/TSP.cpp +++ b/test/graph/TSP.cpp @@ -7,9 +7,9 @@ constexpr ll INF = LL::INF; #include <graph/TSP.cpp> vector<int> naive() { - int n = sz(dist); + int n = ssize(dist); vector<int> todo(n - 1); - iota(all(todo), 1); + iota(begin(todo), end(todo), 1); vector<int> res; ll best = LL::INF; do { @@ -26,7 +26,7 @@ vector<int> naive() { res.insert(res.begin(), 0); res.push_back(0); } - } while (next_permutation(all(todo))); + } while (ranges::next_permutation(todo).found); return res; } @@ -39,7 +39,7 @@ void stress_test() { auto expected = naive(); auto got = TSP(); - + if (got != expected) cerr << "error" << FAIL; queries += n; } diff --git a/test/graph/articulationPoints.bcc.cpp b/test/graph/articulationPoints.bcc.cpp index e9fc32f..927ceb4 100644 --- a/test/graph/articulationPoints.bcc.cpp +++ b/test/graph/articulationPoints.bcc.cpp @@ -8,11 +8,11 @@ struct edge { #include <datastructures/unionFind.cpp> vector<vector<int>> naiveBCC(int m) { - init(m); + UnionFind uf(m); - vector<int> seen(sz(adj), -1); + vector<int> seen(ssize(adj), -1); int run = 0; - for (int i = 0; i < sz(adj); i++) { + for (int i = 0; i < ssize(adj); i++) { for (auto e : adj[i]) { run++; seen[i] = run; @@ -28,17 +28,17 @@ vector<vector<int>> naiveBCC(int m) { } } for (auto ee : adj[i]) { - if (seen[ee.to] == run) unionSets(ee.id, e.id); + if (seen[ee.to] == run) uf.link(ee.id, e.id); } } } vector<vector<int>> res(m); for (int i = 0; i < m; i++) { - res[findSet(i)].push_back(i); + res[uf.find(i)].push_back(i); } - for (auto& v : res) sort(all(v)); - res.erase(remove_if(all(res), [](const vector<int>& v){return sz(v) <= 1;}), res.end()); - sort(all(res)); + for (auto& v : res) ranges::sort(v); + res.erase(begin(ranges::remove_if(res, [](const vector<int>& v){return ssize(v) <= 1;})), end(res)); + ranges::sort(res); return res; } @@ -60,12 +60,12 @@ void stress_test_bcc(int LIM) { auto expected = naiveBCC(nextId); find(); - vector<vector<int>> got(sz(bcc)); - for (int i = 0; i < sz(bcc); i++) { + vector<vector<int>> got(ssize(bcc)); + for (int i = 0; i < ssize(bcc); i++) { for (auto e : bcc[i]) got[i].push_back(e.id); - sort(all(got[i])); + ranges::sort(got[i]); } - sort(all(got)); + ranges::sort(got); if (got != expected) cerr << "error" << FAIL; queries += n; diff --git a/test/graph/articulationPoints.bridges.cpp b/test/graph/articulationPoints.bridges.cpp index a1b89d2..15408ea 100644 --- a/test/graph/articulationPoints.bridges.cpp +++ b/test/graph/articulationPoints.bridges.cpp @@ -7,10 +7,10 @@ struct edge { #undef Edge vector<bool> naiveBridges(const vector<pair<int, int>>& edges) { - vector<bool> res(sz(edges)); + vector<bool> res(ssize(edges)); - vector<int> seen(sz(adj), -1); - for (int i = 0; i < sz(edges); i++) { + vector<int> seen(ssize(adj), -1); + for (int i = 0; i < ssize(edges); i++) { auto [a, b] = edges[i]; vector<int> todo = {a}; seen[a] = i; @@ -40,14 +40,14 @@ void stress_test_bridges() { adj.assign(n, {}); vector<pair<int, int>> edges; g.forEdges([&](int a, int b){ - adj[a].push_back({a, b, sz(edges)}); - adj[b].push_back({b, a, sz(edges)}); + adj[a].push_back({a, b, ssize(edges)}); + adj[b].push_back({b, a, ssize(edges)}); edges.emplace_back(a, b); }); auto expected = naiveBridges(edges); find(); - vector<bool> got(sz(edges)); + vector<bool> got(ssize(edges)); for (auto e : bridges) { if (got[e.id]) cerr << "error: duclicate" << FAIL; got[e.id] = true; diff --git a/test/graph/articulationPoints.cpp b/test/graph/articulationPoints.cpp index 8ee6bc4..6960f73 100644 --- a/test/graph/articulationPoints.cpp +++ b/test/graph/articulationPoints.cpp @@ -7,10 +7,10 @@ struct edge { #undef Edge vector<bool> naiveArt() { - vector<bool> res(sz(adj)); + vector<bool> res(ssize(adj)); - vector<int> seen(sz(adj), -1); - for (int i = 0; i < sz(adj); i++) { + vector<int> seen(ssize(adj), -1); + for (int i = 0; i < ssize(adj); i++) { if (adj[i].empty()) continue; seen[i] = i; vector<ll> todo = {adj[i][0].to}; @@ -72,9 +72,9 @@ void performance_test() { }); t.start(); - find(); + find(); t.stop(); - hash_t hash = sz(bridges) + sz(bcc); + hash_t hash = ssize(bridges) + ssize(bcc); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/graph/binary_lifting.cpp b/test/graph/binary_lifting.cpp new file mode 100644 index 0000000..20318da --- /dev/null +++ b/test/graph/binary_lifting.cpp @@ -0,0 +1,60 @@ +#include "../util.h" +#include <graph/binary_lifting.cpp> +namespace expected { +#include <graph/hld.cpp> +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer<int>(2, 30); + Graph<NoData> g(n); + g.tree(); + + vector<vector<int>> adj(n); + g.forEdges([&](int a, int b){ + adj[a].push_back(b); + adj[b].push_back(a); + }); + + Lift lift(adj, 0); + + expected::adj = adj; + expected::init(); + + for (int i = 0; i < n; i++) { + for (int j = 0; j <= i; j++) { + auto got = lift.lca(i, j); + auto expected = expected::get_lca(i, j); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + } + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + Graph<NoData> g(N); + g.tree(); + vector<vector<int>> adj(N); + g.forEdges([&](int a, int b){ + adj[a].push_back(b); + adj[b].push_back(a); + }); + + hash_t hash = 0; + t.start(); + Lift lift(adj, 0); + for (int i = 1; i < N; i++) hash += lift.lca(i-1, i); + t.stop(); + if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/blossom.cpp b/test/graph/blossom.cpp index f44f815..56d3132 100644 --- a/test/graph/blossom.cpp +++ b/test/graph/blossom.cpp @@ -1,6 +1,6 @@ #include "../util.h" namespace tutte { -void gauss(int n, int m); +vector<int> gauss(vector<vector<ll>> &mat); #include <graph/matching.cpp> #include <math/shortModInv.cpp> #include <math/lgsFp.cpp> @@ -15,20 +15,20 @@ void stress_test() { GM blossom(n); srand(Random::rng()); - tutte::adj.assign(n, {}); + vector<vector<int>> adj(n); Graph<NoData> g(n); g.erdosRenyi(m); g.forEdges([&](int a, int b){ - tutte::adj[a].push_back(b); - tutte::adj[b].push_back(a); + adj[a].push_back(b); + adj[b].push_back(a); blossom.adj[a].push_back(b); blossom.adj[b].push_back(a); }); ll got = blossom.match(); - ll expected = tutte::max_matching(); + ll expected = tutte::max_matching(adj); vector<bool> seen(n); ll got2 = 0; diff --git a/test/graph/bronKerbosch.cpp b/test/graph/bronKerbosch.cpp index 1a90c06..8c0a200 100644 --- a/test/graph/bronKerbosch.cpp +++ b/test/graph/bronKerbosch.cpp @@ -9,7 +9,7 @@ void naive(bits mask = {}, int l = 0) { if (mask[i]) continue; if ((adj[i] & mask) == mask) maximal = false; } - for (; l < sz(adj); l++) { + for (; l < ssize(adj); l++) { if ((adj[l] & mask) == mask) { maximal = false; mask[l] = 1; @@ -37,10 +37,10 @@ void stress_test() { naiveCliques.clear(); naive(); - sort(all(cliques), [](bits a, bits b){return a.to_ullong() < b.to_ullong();}); - sort(all(naiveCliques), [](bits a, bits b){return a.to_ullong() < b.to_ullong();}); + ranges::sort(cliques, {}, [](bits x) { return x.to_ullong(); }); + ranges::sort(naiveCliques, {}, [](bits x) { return x.to_ullong(); }); - if (cliques != naiveCliques) cerr << "got: " << sz(cliques) << ", expected: " << sz(naiveCliques) << FAIL; + if (cliques != naiveCliques) cerr << "got: " << ssize(cliques) << ", expected: " << ssize(naiveCliques) << FAIL; queries += n; } cerr << "tested random queries: " << queries << endl; @@ -62,7 +62,7 @@ void performance_test() { bronKerbosch(); t.stop(); - hash_t hash = sz(cliques); + hash_t hash = ssize(cliques); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/graph/centroid.cpp b/test/graph/centroid.cpp index c3f1d3f..d231c3e 100644 --- a/test/graph/centroid.cpp +++ b/test/graph/centroid.cpp @@ -13,9 +13,9 @@ int subtreeSize(int c, int p) { vector<int> naive() { vector<int> res; - for (int i = 0; i < sz(adj); i++) { + for (int i = 0; i < ssize(adj); i++) { bool isCentroid = true; - for (int j : adj[i]) isCentroid &= 2*subtreeSize(j, i) <= sz(adj); + for (int j : adj[i]) isCentroid &= 2*subtreeSize(j, i) <= ssize(adj); if (isCentroid) res.push_back(i); } return res; @@ -33,16 +33,16 @@ void stress_test() { adj[a].push_back(b); adj[b].push_back(a); }); - + auto expected = naive(); - sort(all(expected)); + ranges::sort(expected); for (int i = 0; i < n; i++) { auto [a, b] = find_centroid(i); vector<int> got; if (a >= 0) got.push_back(a); if (b >= 0) got.push_back(b); - sort(all(got)); + ranges::sort(got); if (got != expected) cerr << "error" << FAIL; } @@ -63,7 +63,7 @@ void performance_test() { adj[b].push_back(a); }); - t.start(); + t.start(); auto [gotA, gotB] = find_centroid(); t.stop(); hash_t hash = gotA + gotB; diff --git a/test/graph/connect.cpp b/test/graph/connect.cpp index 8114339..ef087e3 100644 --- a/test/graph/connect.cpp +++ b/test/graph/connect.cpp @@ -52,8 +52,8 @@ void stress_test(int lim) { int m = Random::integer<int>(30, 300); vector<int> insertOrder(m); - iota(all(insertOrder), 0); - shuffle(all(insertOrder), Random::rng); + iota(begin(insertOrder), end(insertOrder), 0); + ranges::shuffle(insertOrder, Random::rng); vector<pair<int, int>> edges(m, {-1, -1}); connect con(n, m); @@ -104,15 +104,15 @@ void performance_test() { t.stop(); vector<int> insertOrder(M); - iota(all(insertOrder), 0); - shuffle(all(insertOrder), Random::rng); + iota(begin(insertOrder), end(insertOrder), 0); + ranges::shuffle(insertOrder, Random::rng); vector<bool> inserted(M); for (int i = 0, j = 0; i < N; i++) { int a = Random::integer<int>(0, N); int b = a; while (b == a) b = Random::integer<int>(0, N); - + t.start(); con.addEdge(a, b, insertOrder[i]); t.stop(); diff --git a/test/graph/cycleCounting.cpp b/test/graph/cycleCounting.cpp index 6459162..bfe313e 100644 --- a/test/graph/cycleCounting.cpp +++ b/test/graph/cycleCounting.cpp @@ -4,20 +4,16 @@ int naive(const vector<pair<int, int>>& edges, int n) { int res = 0; - for (int i = 1; i < (1ll << sz(edges)); i++) { + for (int i = 1; i < (1ll << ssize(edges)); i++) { vector<int> deg(n); - init(n); + UnionFind uf(n); int cycles = 0; - for (int j = 0; j < sz(edges); j++) { + for (int j = 0; j < ssize(edges); j++) { if (((i >> j) & 1) != 0) { auto [a, b] = edges[j]; deg[a]++; deg[b]++; - if (findSet(a) != findSet(b)) { - unionSets(a, b); - } else { - cycles++; - } + if (!uf.link(a, b)) cycles++; } } bool ok = cycles == 1; @@ -66,7 +62,7 @@ void performance_test() { t.start(); hash_t hash = cyc.count(); - cerr << sz(cyc.base) << endl; + cerr << ssize(cyc.base) << endl; t.stop(); if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; diff --git a/test/graph/dijkstra.cpp b/test/graph/dijkstra.cpp index dd5b826..d79e700 100644 --- a/test/graph/dijkstra.cpp +++ b/test/graph/dijkstra.cpp @@ -13,21 +13,21 @@ void stress_test(int LIM) { int n = Random::integer<int>(2, 30); int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1))); - vector<vector<path>> adj(n); + vector<vector<pair<int, ll>>> adj(n); vector<edge> edges; Graph<NoData, true> g(n); g.erdosRenyi(m); g.forEdges([&](int a, int b){ ll w = Random::integer<ll>(1, 1'000'000'000'000ll); - adj[a].push_back({w, b}); + adj[a].emplace_back(b, w); edges.push_back({a, b, w}); }); for (int i = 0; i < n; i++) { auto got = dijkstra(adj, i); auto expected = bellmannFord(n, edges, i); - + if (got != expected) cerr << "error" << FAIL; queries += n; } @@ -41,12 +41,12 @@ void performance_test() { timer t; Graph<NoData> g(N); g.erdosRenyi(M); - vector<vector<path>> adj(N); + vector<vector<pair<int, ll>>> adj(N); g.forEdges([&](int a, int b){ ll w1 = Random::integer<ll>(1, 1'000'000'000'000ll); ll w2 = Random::integer<ll>(1, 1'000'000'000'000ll); - adj[a].push_back({w1, b}); - adj[b].push_back({w2, a}); + adj[a].emplace_back(b, w1); + adj[b].emplace_back(a, w2); }); t.start(); diff --git a/test/graph/dinic.cpp b/test/graph/dinic.cpp deleted file mode 100644 index bd270be..0000000 --- a/test/graph/dinic.cpp +++ /dev/null @@ -1,62 +0,0 @@ -#include "../util.h" -constexpr ll INF = LL::INF; -namespace dinic { -#include <graph/dinic.cpp> -} - -namespace pushRelabel { -#include <graph/pushRelabel.cpp> -} - -void stress_test() { - ll queries = 0; - for (int tries = 0; tries < 20'000; tries++) { - int n = Random::integer<int>(2, 30); - int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1))); - - dinic::adj.assign(n, {}); - pushRelabel::adj.assign(n, {}); - - Graph<NoData, true> g(n); - g.erdosRenyi(m); - g.forEdges([](int a, int b){ - ll w = Random::integer<ll>(1, 1'000'000'000'000ll); - dinic::addEdge(a, b, w); - pushRelabel::addEdge(a, b, w); - }); - - ll got = dinic::maxFlow(0, n - 1); - ll expected = pushRelabel::maxFlow(0, n - 1); - - if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; - queries += n; - } - cerr << "tested random queries: " << queries << endl; -} - -constexpr int N = 50000; -constexpr int M = 200000; -void performance_test() { - using namespace dinic; - timer t; - Graph<NoData> g(N); - g.erdosRenyi(M); - adj.assign(N, {}); - g.forEdges([](int a, int b){ - ll w1 = Random::integer<ll>(1, 1'000'000'000'000ll); - ll w2 = Random::integer<ll>(1, 1'000'000'000'000ll); - addEdge(a, b, w1); - addEdge(b, a, w2); - }); - - t.start(); - hash_t hash = maxFlow(0, N - 1); - t.stop(); - if (t.time > 2000) cerr << "too slow: " << t.time << FAIL; - cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; -} - -int main() { - stress_test(); - if (!sanitize) performance_test(); -} diff --git a/test/graph/dinicScaling.cpp b/test/graph/dinitzScaling.cpp index 065dd9e..0ab9718 100644 --- a/test/graph/dinicScaling.cpp +++ b/test/graph/dinitzScaling.cpp @@ -1,6 +1,6 @@ #include "../util.h" -namespace dinic { -#include <graph/dinicScaling.cpp> +namespace dinitz { +#include <graph/dinitzScaling.cpp> } namespace pushRelabel { @@ -13,20 +13,20 @@ void stress_test() { int n = Random::integer<int>(2, 30); int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1))); - dinic::adj.assign(n, {}); + dinitz::adj.assign(n, {}); pushRelabel::adj.assign(n, {}); Graph<NoData, true> g(n); g.erdosRenyi(m); g.forEdges([](int a, int b){ ll w = Random::integer<ll>(1, 1'000'000'000'000ll); - dinic::addEdge(a, b, w); + dinitz::addEdge(a, b, w); pushRelabel::addEdge(a, b, w); }); - ll got = dinic::maxFlow(0, n - 1); + ll got = dinitz::maxFlow(0, n - 1); ll expected = pushRelabel::maxFlow(0, n - 1); - + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; queries += n; } @@ -36,7 +36,7 @@ void stress_test() { constexpr int N = 50000; constexpr int M = 200000; void performance_test() { - using namespace dinic; + using namespace dinitz; timer t; Graph<NoData> g(N); g.erdosRenyi(M); diff --git a/test/graph/euler.cpp b/test/graph/euler.cpp index b26add1..5314123 100644 --- a/test/graph/euler.cpp +++ b/test/graph/euler.cpp @@ -20,7 +20,7 @@ Euler eulerGraph(int n, int m) { } int last = -1; for (int i = 0; i < n; i++) { - if (sz(res.adj[i]) % 2 != 0) { + if (ssize(res.adj[i]) % 2 != 0) { if (last >= 0) { res.addEdge(last, i); last = -1; @@ -41,25 +41,25 @@ void stress_test() { int m = Random::integer<int>(n-1, 200); auto g = eulerGraph(n, m); - + vector<vector<int>> expected(n); for (int i = 0; i < n; i++) { for (auto [j, rev] : g.adj[i]) { expected[i].push_back(j); } - sort(all(expected[i])); + ranges::sort(expected[i]); } g.euler(0); vector<vector<int>> got(n); if (g.cycle.front() != g.cycle.back()) cerr << "error: not cyclic" << FAIL; - for (int i = 1; i < sz(g.cycle); i++) { + for (int i = 1; i < ssize(g.cycle); i++) { int a = g.cycle[i-1]; int b = g.cycle[i]; got[a].push_back(b); got[b].push_back(a); } - for (auto& v : got) sort(all(v)); + for (auto& v : got) ranges::sort(v); if (got != expected) cerr << "error" << FAIL; queries += n; diff --git a/test/graph/floydWarshall.cpp b/test/graph/floydWarshall.cpp index 5926449..819af39 100644 --- a/test/graph/floydWarshall.cpp +++ b/test/graph/floydWarshall.cpp @@ -40,7 +40,7 @@ void stress_test(int LIM) { if (path.empty()) continue; if (path.front() != i) cerr << "error: start" << FAIL; if (path.back() != k) cerr << "error: end" << FAIL; - for (int l = 1; l < sz(path); l++) { + for (int l = 1; l < ssize(path); l++) { if (floydWarshall::dist[i][path[l-1]] + orig[path[l-1]][path[l]] + floydWarshall::dist[path[l]][k] != @@ -52,7 +52,7 @@ void stress_test(int LIM) { for (int i = 0; i < n; i++) { auto got = floydWarshall::dist[i]; auto expected = bellmannFord(n, edges, i); - + if (got != expected) cerr << "error" << FAIL; queries += n; } diff --git a/test/graph/havelHakimi.cpp b/test/graph/havelHakimi.cpp index f0b6fd9..0752b85 100644 --- a/test/graph/havelHakimi.cpp +++ b/test/graph/havelHakimi.cpp @@ -13,11 +13,11 @@ void stress_test() { for (int i = 0; i < n; i++) expected[i] = g.deg(i); auto res = havelHakimi(expected); - if (sz(res) != n) cerr << "error: wrong number of nodes" << FAIL; + if (ssize(res) != n) cerr << "error: wrong number of nodes" << FAIL; vector<vector<int>> rev(n); vector<int> got(n); for (int i = 0; i < n; i++) { - got[i] = sz(res[i]); + got[i] = ssize(res[i]); for (int j : res[i]) { if (j < 0 || j >= n) cerr << "error: invalid edge" << FAIL; rev[j].push_back(i); @@ -25,11 +25,11 @@ void stress_test() { } for (int i = 0; i < n; i++) { - sort(all(res[i])); - sort(all(rev[i])); + ranges::sort(res[i]); + ranges::sort(rev[i]); if (res[i] != rev[i]) cerr << "error: graph is directed" << FAIL; for (int j : res[i]) if (j == i) cerr << "error: graph has loop" << FAIL; - for (int j = 1; j < sz(res[i]); j++) { + for (int j = 1; j < ssize(res[i]); j++) { if (res[i][j] == res[i][j-1]) cerr << "error: multiedge" << FAIL; } } @@ -54,7 +54,7 @@ void performance_test() { auto res = havelHakimi(expected); t.stop(); hash_t hash = 0; - for (auto& v : res) hash += sz(v); + for (auto& v : res) hash += ssize(v); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/graph/hopcroftKarp.cpp b/test/graph/hopcroftKarp.cpp index a6306b6..c446c99 100644 --- a/test/graph/hopcroftKarp.cpp +++ b/test/graph/hopcroftKarp.cpp @@ -1,6 +1,6 @@ #include "../util.h" namespace kuhn { -#include <graph/maxCarBiMatch.cpp> +#include <graph/kuhn.cpp> } namespace hk { #include <graph/hopcroftKarp.cpp> diff --git a/test/graph/kruskal.cpp b/test/graph/kruskal.cpp index d80376f..157a2f4 100644 --- a/test/graph/kruskal.cpp +++ b/test/graph/kruskal.cpp @@ -1,22 +1,19 @@ #include "../util.h" #include <datastructures/unionFind.cpp> -struct edge { +#define Edge Edge_ // we have a struct named Edge in util.h + +struct Edge { int from, to; ll cost; - bool operator<(const edge& o) const { + bool operator<(const Edge& o) const { return cost > o.cost; } }; -ll kruskal(vector<edge>& edges, int n) { - init(n); - #define Edge edge - #include <graph/kruskal.cpp> - #undef Edge - return cost; -} -ll prim(vector<edge>& edges, int n) { +#include <graph/kruskal.cpp> + +ll prim(vector<Edge>& edges, int n) { vector<vector<pair<ll, int>>> adj(n); for (auto [a, b, d] : edges) { adj[a].emplace_back(d, b); @@ -51,13 +48,14 @@ void stress_test() { Graph<NoData> g(n); g.erdosRenyi(m); - vector<edge> edges; + vector<Edge> edges; g.forEdges([&](int a, int b){ ll w = Random::integer<ll>(-1'000'000'000ll, 1'000'000'000ll); edges.push_back({a, b, w}); }); - ll got = kruskal(edges, n); + vector<Edge> mst; + ll got = kruskal(n, edges, mst); ll expected = prim(edges, n); if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; @@ -72,14 +70,15 @@ void performance_test() { timer t; Graph<NoData> g(N); g.erdosRenyi(M); - vector<edge> edges; + vector<Edge> edges; g.forEdges([&](int a, int b){ ll w = Random::integer<ll>(-1'000'000'000ll, 1'000'000'000ll); edges.push_back({a, b, w}); }); t.start(); - hash_t hash = kruskal(edges, N); + vector<Edge> mst; + hash_t hash = kruskal(N, edges, mst); t.stop(); if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; diff --git a/test/graph/maxCarBiMatch.cpp b/test/graph/kuhn.cpp index 6672d30..0a6a9a4 100644 --- a/test/graph/maxCarBiMatch.cpp +++ b/test/graph/kuhn.cpp @@ -1,6 +1,6 @@ #include "../util.h" namespace kuhn { -#include <graph/maxCarBiMatch.cpp> +#include <graph/kuhn.cpp> } namespace hk { #include <graph/hopcroftKarp.cpp> diff --git a/test/graph/matching.cpp b/test/graph/matching.cpp index ccd98e6..d737954 100644 --- a/test/graph/matching.cpp +++ b/test/graph/matching.cpp @@ -1,6 +1,6 @@ #include "../util.h" namespace tutte { -void gauss(int n, int m); +vector<int> gauss(vector<vector<ll>> &mat); #include <graph/matching.cpp> #include <math/shortModInv.cpp> #include <math/lgsFp.cpp> @@ -15,19 +15,19 @@ void stress_test() { GM blossom(n); srand(Random::rng()); - tutte::adj.assign(n, {}); + vector<vector<int>> adj(n); Graph<NoData> g(n); g.erdosRenyi(m); g.forEdges([&](int a, int b){ - tutte::adj[a].push_back(b); - tutte::adj[b].push_back(a); + adj[a].push_back(b); + adj[b].push_back(a); blossom.adj[a].push_back(b); blossom.adj[b].push_back(a); }); - ll got = tutte::max_matching(); + ll got = tutte::max_matching(adj); ll expected = blossom.match(); if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; @@ -43,14 +43,14 @@ void performance_test() { Graph<NoData> g(N); g.erdosRenyi(M); srand(Random::rng()); - tutte::adj.assign(N, {}); + vector<vector<int>> adj(N); g.forEdges([&](int a, int b){ - tutte::adj[a].push_back(b); - tutte::adj[b].push_back(a); + adj[a].push_back(b); + adj[b].push_back(a); }); t.start(); - hash_t hash = tutte::max_matching(); + hash_t hash = tutte::max_matching(adj); t.stop(); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; diff --git a/test/graph/pushRelabel.cpp b/test/graph/pushRelabel.cpp index 00a73d1..ca50860 100644 --- a/test/graph/pushRelabel.cpp +++ b/test/graph/pushRelabel.cpp @@ -1,6 +1,6 @@ #include "../util.h" -namespace dinic { -#include <graph/dinicScaling.cpp> +namespace dinitz { +#include <graph/dinitzScaling.cpp> } namespace pushRelabel { @@ -13,20 +13,20 @@ void stress_test() { int n = Random::integer<int>(2, 30); int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1))); - dinic::adj.assign(n, {}); + dinitz::adj.assign(n, {}); pushRelabel::adj.assign(n, {}); Graph<NoData, true> g(n); g.erdosRenyi(m); g.forEdges([](int a, int b){ ll w = Random::integer<ll>(1, 1'000'000'000'000ll); - dinic::addEdge(a, b, w); + dinitz::addEdge(a, b, w); pushRelabel::addEdge(a, b, w); }); ll got = pushRelabel::maxFlow(0, n - 1); - ll expected = dinic::maxFlow(0, n - 1); - + ll expected = dinitz::maxFlow(0, n - 1); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; queries += n; } diff --git a/test/graph/reroot.cpp b/test/graph/reroot.cpp index 6fc2f4d..c7c4608 100644 --- a/test/graph/reroot.cpp +++ b/test/graph/reroot.cpp @@ -47,7 +47,7 @@ void performance_test() { t.start(); Reroot re; auto ans = re.solve(); - hash = accumulate(all(ans), 0LL); + hash = accumulate(begin(ans), end(ans), 0LL); t.stop(); if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; diff --git a/test/graph/scc.cpp b/test/graph/scc.cpp index 7c1261f..ebd3af0 100644 --- a/test/graph/scc.cpp +++ b/test/graph/scc.cpp @@ -9,11 +9,11 @@ void stress_test() { Graph<NoData, true> g(n); g.erdosRenyi(m); - adj.assign(n, {}); - g.forEdges([](int a, int b){ + vector<vector<int>> adj(n); + g.forEdges([&](int a, int b){ adj[a].push_back(b); }); - scc(); + SCC scc(adj); auto reach = [&](int a) -> vector<bool> { vector<bool> seen(n); @@ -28,12 +28,21 @@ void stress_test() { return seen; }; + vector<int> seen(n); + for (int i = 0; i < ssize(scc.sccs); i++) { + for (int v: scc.sccs[i]) { + if (scc.idx[v] != i) cerr << v << " is in scc " << i << ", but idx[" << v << "] = " << scc.idx[v] << FAIL; + seen[v]++; + } + } + for (int a = 0; a < n; a++) { + if (seen[a] != 1) cerr << a << " occurs " << seen[a] << " times in sccs" << FAIL; vector<bool> reacha = reach(a); for (int b = 0; b < n; b++) { - if (idx[a] == idx[b]) { + if (scc.idx[a] == scc.idx[b]) { if (!reacha[b]) cerr << a << " and " << b << " should be in different SCCs" << FAIL; - } else if (idx[a] < idx[b]) { + } else if (scc.idx[a] < scc.idx[b]) { if (reacha[b]) cerr << a << " should come before " << b << " in topological order" << FAIL; } } @@ -49,16 +58,16 @@ void performance_test() { timer t; Graph<NoData, true> g(N); g.erdosRenyi(M); - adj.assign(N, {}); - g.forEdges([](int a, int b){ + vector<vector<int>> adj(N); + g.forEdges([&](int a, int b){ adj[a].push_back(b); }); t.start(); - scc(); + SCC scc(adj); t.stop(); hash_t hash = 0; - for (int x : idx) hash += x; + for (int x : scc.idx) hash += x; if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/graph/stoerWagner.cpp b/test/graph/stoerWagner.cpp index cc01a7d..3b67aac 100644 --- a/test/graph/stoerWagner.cpp +++ b/test/graph/stoerWagner.cpp @@ -13,7 +13,7 @@ namespace pushRelabel { #include <graph/pushRelabel.cpp> ll minCut() { ll res = INF; - for (int i = 0; i < sz(adj); i++) { + for (int i = 0; i < ssize(adj); i++) { for (int j = 0; j < i; j++) { if (i == j) continue; res = min(res, maxFlow(i, j)); @@ -48,7 +48,7 @@ void stress_test() { ll got = stoerWagner::stoer_wagner(); ll expected = pushRelabel::minCut(); - + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; queries += n; } diff --git a/test/graph/treeIsomorphism.cpp b/test/graph/treeIsomorphism.cpp index e5fd817..1594016 100644 --- a/test/graph/treeIsomorphism.cpp +++ b/test/graph/treeIsomorphism.cpp @@ -45,7 +45,7 @@ void stress_test_eq() { void test_tiny() { vector<int> expected = {1,1,1,1,2,3,6,11,23}; //#A000055 - for (int i = 1; i < sz(expected); i++) { + for (int i = 1; i < ssize(expected); i++) { set<pair<int, int>> got; tree t(i); @@ -63,9 +63,9 @@ void test_tiny() { got.insert(t.treeLabel()); } - if (sz(got) != expected[i]) cerr << i << ", got: " << sz(got) << ", expected: " << expected[i] << FAIL; + if (ssize(got) != expected[i]) cerr << i << ", got: " << ssize(got) << ", expected: " << expected[i] << FAIL; } - cerr << "tested tiny: " << sz(expected) << endl; + cerr << "tested tiny: " << ssize(expected) << endl; } void stress_test_neq() { @@ -110,7 +110,7 @@ void performance_test() { tt.adj[b].push_back(a); }); - t.start(); + t.start(); auto [gotA, gotB] = tt.treeLabel(); t.stop(); hash_t hash = gotA + gotB; diff --git a/test/graph/virtualTree.cpp b/test/graph/virtualTree.cpp index af57619..556ba7b 100644 --- a/test/graph/virtualTree.cpp +++ b/test/graph/virtualTree.cpp @@ -21,7 +21,7 @@ int lca(int u, int v) { } void init(vector<vector<int>>& adj) { - int n = (int)sz(adj); + int n = (int)ssize(adj); d.assign(n, 0); in = par = out = d; int counter = 0; @@ -44,7 +44,7 @@ void stress_test() { vector<int> ind = Random::distinct(Random::integer(1, n+1), 0, n); auto [idk, tree] = virtualTree(ind); vector<pair<int, int>> edges; - for (int i=0; i<sz(idk); i++) for (int v : tree[i]) { + for (int i=0; i<ssize(idk); i++) for (int v : tree[i]) { edges.emplace_back(idk[i], idk[v]); } @@ -60,7 +60,7 @@ void stress_test() { }; dfs(dfs, 0, -1, -1); - sort(all(edges)), sort(all(edges2)); + ranges::sort(edges), ranges::sort(edges2); if (edges != edges2) cerr << "WA edge list does not match" << FAIL; } cerr << "tested random 50'000 tests" << endl; @@ -83,7 +83,7 @@ void performance_test() { ll hash = 0; t.start(); auto [idk, tree] = virtualTree(ind); - hash = accumulate(all(idk), 0LL); + hash = accumulate(begin(idk), end(idk), 0LL); t.stop(); if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; diff --git a/test/math/berlekampMassey.cpp b/test/math/berlekampMassey.cpp index c7e83fc..93832b0 100644 --- a/test/math/berlekampMassey.cpp +++ b/test/math/berlekampMassey.cpp @@ -12,10 +12,10 @@ struct RandomRecurence { } ll operator()(ll k){ - while (sz(cache) <= k) { + while (ssize(cache) <= k) { ll cur = 0; - for (ll i = 0; i < sz(c); i++) { - cur += (c[i] * cache[sz(cache) - i - 1]) % mod; + for (ll i = 0; i < ssize(c); i++) { + cur += (c[i] * cache[ssize(cache) - i - 1]) % mod; } cur %= mod; cache.push_back(cur); @@ -60,7 +60,7 @@ void performance_test() { cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } - + int main() { stress_test(); if (!sanitize) performance_test(); diff --git a/test/math/bigint.cpp b/test/math/bigint.cpp index 53e18dd..2d75343 100644 --- a/test/math/bigint.cpp +++ b/test/math/bigint.cpp @@ -9,7 +9,7 @@ struct modInt { stringstream a; a << x; string b = a.str(); - for (ll i = b[0] == '-' ? 1 : 0; i < sz(b); i++) { + for (ll i = b[0] == '-' ? 1 : 0; i < ssize(b); i++) { value *= 10; value += b[i] - '0'; value %= MOD; @@ -115,7 +115,7 @@ void stress_test(int LIM) { } cerr << "tested random queries: " << queries << endl; } - + int main() { stress_test(100); if (!sanitize) stress_test(1000); diff --git a/test/math/binomial0.cpp b/test/math/binomial0.cpp index 00c04d4..17a5f91 100644 --- a/test/math/binomial0.cpp +++ b/test/math/binomial0.cpp @@ -4,17 +4,16 @@ constexpr ll mod = 1'394'633'899; #include <math/binomial0.cpp> - void stress_test() { vector<ll> last = {1}; ll queries = 0; for (ll i = 0; i < 10'000; i++) { for (ll j = 0; j <= i; j++) { - ll got = calc_binom(i, j); + ll got = binom(i, j); ll expected = last[j]; - if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; + if (got != expected) cerr << "binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; } - queries += sz(last); + queries += ssize(last); last.push_back(1); for (ll j = i; j > 0; j--) { diff --git a/test/math/binomial1.cpp b/test/math/binomial1.cpp index f6fe20b..3a7b291 100644 --- a/test/math/binomial1.cpp +++ b/test/math/binomial1.cpp @@ -7,11 +7,11 @@ void stress_test() { ll queries = 0; for (ll i = 0; i <= 61; i++) { for (ll j = 0; j <= i; j++) { - ll got = calc_binom(i, j); + ll got = binom(i, j); ll expected = last[j]; - if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; + if (got != expected) cerr << "binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; } - queries += sz(last); + queries += ssize(last); last.push_back(1); for (ll j = i; j > 0; j--) { diff --git a/test/math/binomial2.cpp b/test/math/binomial2.cpp index b55c8af..ce6a07c 100644 --- a/test/math/binomial2.cpp +++ b/test/math/binomial2.cpp @@ -8,11 +8,11 @@ void stress_test() { ll queries = 0; for (ll i = 0; i <= 1000; i++) { for (ll j = 0; j <= i; j++) { - ll got = calc_binom(i, j); + ll got = binom(i, j); ll expected = last[j]; - if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; + if (got != expected) cerr << "binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; } - queries += sz(last); + queries += ssize(last); last.push_back(1); for (ll j = i; j > 0; j--) { diff --git a/test/math/binomial3.cpp b/test/math/binomial3.cpp index 4a99689..eaca24e 100644 --- a/test/math/binomial3.cpp +++ b/test/math/binomial3.cpp @@ -11,11 +11,11 @@ void stress_test() { ll queries = 0; for (ll i = 0; i < mod; i++) { for (ll j = 0; j <= i; j++) { - ll got = calc_binom(i, j, mod); + ll got = binom(i, j, mod); ll expected = last[j]; - if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; + if (got != expected) cerr << "binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; } - queries += sz(last); + queries += ssize(last); last.push_back(1); for (ll j = i; j > 0; j--) { diff --git a/test/math/cycleDetection.cpp b/test/math/cycleDetection.cpp index 09480b1..2ba3525 100644 --- a/test/math/cycleDetection.cpp +++ b/test/math/cycleDetection.cpp @@ -1,5 +1,4 @@ #include "../util.h" -#include <datastructures/pbds.cpp> #include <math/cycleDetection.cpp> pair<ll, ll> naive(ll x0, function<ll(ll)> f) { diff --git a/test/math/gauss.cpp b/test/math/gauss.cpp index 167aa62..21a5736 100644 --- a/test/math/gauss.cpp +++ b/test/math/gauss.cpp @@ -7,10 +7,10 @@ vector<vector<double>> mat; #include <math/gauss.cpp> vector<vector<double>> inverseMat(const vector<vector<double>>& m) { - int n = sz(m); + int n = ssize(m); mat = m; for (int i = 0; i < n; i++) { - if (sz(mat[i]) != n) cerr << "error: no square matrix" << FAIL; + if (ssize(mat[i]) != n) cerr << "error: no square matrix" << FAIL; mat[i].resize(2*n); mat[i][n+i] = 1; } @@ -27,10 +27,10 @@ vector<vector<double>> inverseMat(const vector<vector<double>>& m) { } vector<vector<double>> mul(const vector<vector<double>>& a, const vector<vector<double>>& b) { - int n = sz(a); - int m = sz(b[0]); - int x = sz(b); - if (sz(a[0]) != sz(b)) cerr << "error: wrong dimensions" << FAIL; + int n = ssize(a); + int m = ssize(b[0]); + int x = ssize(b); + if (ssize(a[0]) != ssize(b)) cerr << "error: wrong dimensions" << FAIL; vector<vector<double>> res(n, vector<double>(m)); for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { @@ -48,21 +48,21 @@ void test_tiny() { {0, 5, 6, 7}, {0, 0, 8, 9}, }; - if (gauss(sz(mat)) != UNIQUE) cerr << "error: 1" << FAIL; + if (gauss(ssize(mat)) != UNIQUE) cerr << "error: 1" << FAIL; mat = { {-1, 1, 0, -1}, { 2, 6, 0, 10}, { 1, -2, 0, 0}, }; - if (gauss(sz(mat)) != MULTIPLE) cerr << "error: 2" << FAIL; + if (gauss(ssize(mat)) != MULTIPLE) cerr << "error: 2" << FAIL; mat = { {-1, 1, 0, -1}, { 2, 6, 0, 10}, { 1, -2, 0, 1}, }; - if (gauss(sz(mat)) != INCONSISTENT) cerr << "error: 3" << FAIL; + if (gauss(ssize(mat)) != INCONSISTENT) cerr << "error: 3" << FAIL; } void stress_test_inv() { diff --git a/test/math/inversions.cpp b/test/math/inversions.cpp index 42ab343..86d87d0 100644 --- a/test/math/inversions.cpp +++ b/test/math/inversions.cpp @@ -1,10 +1,9 @@ #include "../util.h" -#include <datastructures/pbds.cpp> #include <math/inversions.cpp> ll naive(const vector<ll>& v) { ll res = 0; - for (ll i = 0; i < sz(v); i++) { + for (ll i = 0; i < ssize(v); i++) { for (ll j = 0; j < i; j++) { if (v[j] > v[i]) res++; } diff --git a/test/math/inversionsMerge.cpp b/test/math/inversionsMerge.cpp index 2492af4..ab1c62e 100644 --- a/test/math/inversionsMerge.cpp +++ b/test/math/inversionsMerge.cpp @@ -3,7 +3,7 @@ ll naive(const vector<ll>& v) { ll res = 0; - for (ll i = 0; i < sz(v); i++) { + for (ll i = 0; i < ssize(v); i++) { for (ll j = 0; j < i; j++) { if (v[j] > v[i]) res++; } @@ -17,7 +17,7 @@ void stress_test() { int n = Random::integer<int>(1, 100); vector<ll> v(n); for (ll j = 0; j < n; j++) v[j] = (j-10) * 100000 + Random::integer<ll>(0, 10000); //values must be unique ): - shuffle(all(v), Random::rng); + ranges::shuffle(v, Random::rng); ll expected = naive(v); ll got = mergeSort(v); if (got != expected) { diff --git a/test/math/kthperm.cpp b/test/math/kthperm.cpp index 1bf8db3..ca95699 100644 --- a/test/math/kthperm.cpp +++ b/test/math/kthperm.cpp @@ -1,5 +1,4 @@ #include "../util.h" -#include <datastructures/pbds.cpp> #include <math/kthperm.cpp> void stress_test(int LIM) { @@ -7,13 +6,13 @@ void stress_test(int LIM) { for (int i = 0; i < LIM; i++) { int n = Random::integer<int>(1, 100); vector<ll> expected(n); - iota(all(expected), 0); + iota(begin(expected), end(expected), 0); ll k = 0; do { auto got = kthperm(n, k); if (got != expected) cerr << "error" << FAIL; k++; - } while (k < 100 && next_permutation(all(expected))); + } while (k < 100 && ranges::next_permutation(expected).found); queries += n; } cerr << "tested queries: " << queries << endl; diff --git a/test/math/kthperm_permIndex.cpp b/test/math/kthperm_permIndex.cpp index d84524e..5e05c73 100644 --- a/test/math/kthperm_permIndex.cpp +++ b/test/math/kthperm_permIndex.cpp @@ -1,5 +1,4 @@ #include "../util.h" -#include <datastructures/pbds.cpp> #include <math/kthperm.cpp> #include <math/permIndex.cpp> diff --git a/test/math/lgsFp.cpp b/test/math/lgsFp.cpp index 6db586a..d8967a0 100644 --- a/test/math/lgsFp.cpp +++ b/test/math/lgsFp.cpp @@ -1,6 +1,5 @@ #include "../util.h" #include <math/shortModInv.cpp> -vector<vector<ll>> mat; constexpr ll mod = 1'000'000'007; namespace lgs { #include <math/lgsFp.cpp> @@ -8,30 +7,26 @@ namespace lgs { vector<vector<ll>> inverseMat(const vector<vector<ll>>& m) { - int n = sz(m); - mat = m; + int n = ssize(m); + vector<vector<ll>> mat = m; for (int i = 0; i < n; i++) { - if (sz(mat[i]) != n) cerr << "error: no square matrix" << FAIL; + if (ssize(mat[i]) != n) cerr << "error: no square matrix" << FAIL; mat[i].resize(2*n); mat[i][n+i] = 1; } - lgs::gauss(sz(mat), sz(mat[0])); - vector<vector<ll>> res(m); + vector<int> pivots = lgs::gauss(mat); for (int i = 0; i < n; i++) { - res[i] = vector<ll>(mat[i].begin() + n, mat[i].end()); - for (int j = 0; j < n; j++) { - if (j != i && mat[i][j] != 0) cerr << "error: not full rank?" << FAIL; - if (j == i && mat[i][j] != 1) cerr << "error: not full rank?" << FAIL; - } + if (pivots[i] != i) cerr << "error: not full rank?" << FAIL; + mat[i].erase(begin(mat[i]), begin(mat[i]) + n); } - return res; + return mat; } vector<vector<ll>> mul(const vector<vector<ll>>& a, const vector<vector<ll>>& b) { - int n = sz(a); - int m = sz(b[0]); - int x = sz(b); - if (sz(a[0]) != sz(b)) cerr << "error: wrong dimensions" << FAIL; + int n = ssize(a); + int m = ssize(b[0]); + int x = ssize(b); + if (ssize(a[0]) != ssize(b)) cerr << "error: wrong dimensions" << FAIL; vector<vector<ll>> res(n, vector<ll>(m)); for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { @@ -53,18 +48,17 @@ void test_square() { vector<vector<ll>> m(n); for (auto& v : m) v = Random::integers<ll>(n, 0, mod); - mat = m; - lgs::gauss(sz(mat), sz(mat[0])); + lgs::gauss(m); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { - hash += mat[i][j]; + hash += m[i][j]; } } queries += n; } - cerr << "tested sqaures: " << queries << " (hash: " << hash << ")" << endl;; + cerr << "tested squares: " << queries << " (hash: " << hash << ")" << endl;; } void stress_test_inv() { @@ -82,8 +76,7 @@ void stress_test_inv() { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { - if (i == j && prod[i][j] != 1) cerr << "error: not inverted" << FAIL; - if (i != j && prod[i][j] != 0) cerr << "error: not inverted" << FAIL; + if (prod[i][j] != (i == j)) cerr << "error: not inverted" << FAIL; } } @@ -98,15 +91,14 @@ void performance_test() { vector<vector<ll>> m(N); for (auto& v : m) v = Random::integers<ll>(N, 0, mod); - mat = m; t.start(); - lgs::gauss(sz(mat), sz(mat[0])); + lgs::gauss(m); t.stop(); hash_t hash = 0; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { - hash += mat[i][j]; + hash += m[i][j]; } } if (t.time > 500) cerr << "too slow: " << t.time << FAIL; diff --git a/test/math/linearRecurrence.cpp b/test/math/linearRecurrence.cpp index 977221e..a2ac01d 100644 --- a/test/math/linearRecurrence.cpp +++ b/test/math/linearRecurrence.cpp @@ -6,16 +6,15 @@ vector<ll> mul(const vector<ll>& a, const vector<ll>& b) { return mulSlow(a, b); } - struct RandomRecurence { vector<ll> f, c, cache; RandomRecurence(int n) : f(Random::integers<ll>(n, 0, mod)), c(Random::integers<ll>(n, 0, mod)), cache(f) {} ll operator()(ll k){ - while (sz(cache) <= k) { + while (ssize(cache) <= k) { ll cur = 0; - for (ll i = 0; i < sz(c); i++) { - cur += (c[i] * cache[sz(cache) - i - 1]) % mod; + for (ll i = 0; i < ssize(c); i++) { + cur += (c[i] * cache[ssize(cache) - i - 1]) % mod; } cur %= mod; cache.push_back(cur); diff --git a/test/math/linearRecurrenceNTT.cpp b/test/math/linearRecurrenceNTT.cpp index ca7e29e..f7615d6 100644 --- a/test/math/linearRecurrenceNTT.cpp +++ b/test/math/linearRecurrenceNTT.cpp @@ -12,10 +12,10 @@ struct RandomRecurence { RandomRecurence(int n) : f(Random::integers<ll>(n, 0, mod)), c(Random::integers<ll>(n, 0, mod)), cache(f) {} ll operator()(ll k){ - while (sz(cache) <= k) { + while (ssize(cache) <= k) { ll cur = 0; - for (ll i = 0; i < sz(c); i++) { - cur += (c[i] * cache[sz(cache) - i - 1]) % mod; + for (ll i = 0; i < ssize(c); i++) { + cur += (c[i] * cache[ssize(cache) - i - 1]) % mod; } cur %= mod; cache.push_back(cur); diff --git a/test/math/linearRecurrenceOld.cpp b/test/math/linearRecurrenceOld.cpp index 6435d5a..b3d1611 100644 --- a/test/math/linearRecurrenceOld.cpp +++ b/test/math/linearRecurrenceOld.cpp @@ -6,10 +6,10 @@ struct RandomRecurence { RandomRecurence(int n) : f(Random::integers<ll>(n, 0, mod)), c(Random::integers<ll>(n, 0, mod)), cache(f) {} ll operator()(ll k){ - while (sz(cache) <= k) { + while (ssize(cache) <= k) { ll cur = 0; - for (ll i = 0; i < sz(c); i++) { - cur += (c[i] * cache[sz(cache) - i - 1]) % mod; + for (ll i = 0; i < ssize(c); i++) { + cur += (c[i] * cache[ssize(cache) - i - 1]) % mod; } cur %= mod; cache.push_back(cur); diff --git a/test/math/linearSieve.cpp b/test/math/linearSieve.cpp index fbed4b5..1a5286f 100644 --- a/test/math/linearSieve.cpp +++ b/test/math/linearSieve.cpp @@ -57,7 +57,7 @@ void performance_test() { timer t; t.start(); sieve(); - hash_t hash = sz(primes); + hash_t hash = ssize(primes); t.stop(); if (!sanitize) { if (t.time > 500) cerr << "too slow: " << t.time << FAIL; diff --git a/test/math/longestIncreasingSubsequence.cpp b/test/math/longestIncreasingSubsequence.cpp index d08cf57..5bc3936 100644 --- a/test/math/longestIncreasingSubsequence.cpp +++ b/test/math/longestIncreasingSubsequence.cpp @@ -9,7 +9,7 @@ constexpr ll INF = LL::INF; template<bool STRICT> bool isLis(const vector<ll>& a, const vector<int>& lis) { - for (int i = 1; i < sz(lis); i++) { + for (int i = 1; i < ssize(lis); i++) { if (lis[i-1] >= lis[i]) return false; if (a[lis[i-1]] > a[lis[i]]) return false; if (STRICT && a[lis[i-1]] == a[lis[i]]) return false; @@ -20,12 +20,12 @@ bool isLis(const vector<ll>& a, const vector<int>& lis) { template<bool STRICT> vector<int> naive(const vector<ll>& a) { vector<int> res; - for (ll i = 1; i < (1ll << sz(a)); i++) { + for (ll i = 1; i < (1ll << ssize(a)); i++) { vector<int> tmp; - for (ll j = 0; j < sz(a); j++) { + for (ll j = 0; j < ssize(a); j++) { if (((i >> j) & 1) != 0) tmp.push_back(j); } - if (sz(tmp) >= sz(res) && isLis<STRICT>(a, tmp)) res = tmp; + if (ssize(tmp) >= ssize(res) && isLis<STRICT>(a, tmp)) res = tmp; } return res; } @@ -56,10 +56,9 @@ void performance_test() { timer t; auto a = Random::integers<ll>(N, -10'000, 10'000); auto b = Random::integers<ll>(N, -10'000, 10'000); - sort(all(b)); + ranges::sort(b); auto c = Random::integers<ll>(N, -10'000, 10'000); - sort(all(c)); - reverse(all(c)); + ranges::sort(c | views::reverse); hash_t hash = 0; t.start(); hash += lis(a).size(); diff --git a/test/math/matrixPower.cpp b/test/math/matrixPower.cpp index b1d6783..083dded 100644 --- a/test/math/matrixPower.cpp +++ b/test/math/matrixPower.cpp @@ -7,15 +7,15 @@ struct mat { mat(int dim = 0, int diag = 1) : m(dim, vector<ll>(dim)) { for (int i = 0; i < dim; i++) m[i][i] = diag; } - mat(const vector<ll> c) : m(sz(c), vector<ll>(sz(c))) { + mat(const vector<ll> c) : m(ssize(c), vector<ll>(ssize(c))) { m[0] = c; - for (ll i = 1; i < sz(c); i++) { + for (ll i = 1; i < ssize(c); i++) { m[i][i-1] = 1; } } mat operator*(const mat& o) const { - int dim = sz(m); + int dim = ssize(m); mat res(dim, 0); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { @@ -29,7 +29,7 @@ struct mat { } vector<ll> operator*(const vector<ll>& o) const { - int dim = sz(m); + int dim = ssize(m); vector<ll> res(dim); for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { @@ -48,10 +48,10 @@ struct RandomRecurence { RandomRecurence(int n) : f(Random::integers<ll>(n, 0, mod)), c(Random::integers<ll>(n, 0, mod)), cache(f) {} ll operator()(ll k){ - while (sz(cache) <= k) { + while (ssize(cache) <= k) { ll cur = 0; - for (ll i = 0; i < sz(c); i++) { - cur += (c[i] * cache[sz(cache) - i - 1]) % mod; + for (ll i = 0; i < ssize(c); i++) { + cur += (c[i] * cache[ssize(cache) - i - 1]) % mod; } cur %= mod; cache.push_back(cur); @@ -67,13 +67,13 @@ void stress_test() { RandomRecurence f(n); precalc(mat(f.c)); auto tmp = f.f; - reverse(all(tmp)); + ranges::reverse(tmp); for (int j = 0; j < 100; j++) { ll k = Random::integer<ll>(0, 1000); vector<ll> got = calc(k, tmp); - vector<ll> expected(sz(f.f)); + vector<ll> expected(ssize(f.f)); for (ll l = 0; l < n; l++) expected[n - 1 - l] = f(k + l); if (got != expected) cerr << "error" << FAIL; @@ -89,7 +89,7 @@ void performance_test() { timer t; RandomRecurence f(N); auto tmp = f.f; - reverse(all(tmp)); + ranges::reverse(tmp); t.start(); precalc(mat(f.c)); diff --git a/test/math/millerRabin.base32.cpp b/test/math/millerRabin.base32.cpp index 069f125..e9a4b57 100644 --- a/test/math/millerRabin.base32.cpp +++ b/test/math/millerRabin.base32.cpp @@ -95,7 +95,7 @@ void extra_tests() { t.start(); auto got = isPrime(x); t.stop(); - bool expected = sz(factors) == 1 && factors.begin()->second == 1; + bool expected = ssize(factors) == 1 && factors.begin()->second == 1; if (got != expected) cerr << "error: " << x << FAIL; } if (t.time > 10) cerr << "too slow" << FAIL; diff --git a/test/math/millerRabin.cpp b/test/math/millerRabin.cpp index 18fad40..e7feba1 100644 --- a/test/math/millerRabin.cpp +++ b/test/math/millerRabin.cpp @@ -87,7 +87,7 @@ void extra_tests() { t.start(); auto got = isPrime(x); t.stop(); - bool expected = sz(factors) == 1 && factors.begin()->second == 1; + bool expected = ssize(factors) == 1 && factors.begin()->second == 1; if (got != expected) cerr << "error: " << x << FAIL; } if (t.time > 10) cerr << "too slow" << FAIL; diff --git a/test/math/modExp.cpp b/test/math/modExp.cpp deleted file mode 100644 index 4d2b4b2..0000000 --- a/test/math/modExp.cpp +++ /dev/null @@ -1,42 +0,0 @@ -#include "../util.h" -#include <math/modExp.cpp> - -void stress_test() { - ll queries = 0; - for (ll i = 0; i < 10'000; i++) { - int a = Random::integer<int>(1, 100); - int n = Random::integer<int>(2, 100); - ll expected = 1; - ll k = 0; - do { - auto got = powMod(a, k, n); - if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; - k++; - expected = (expected * a) % n; - } while (k < 100); - queries += n; - } - cerr << "tested queries: " << queries << endl; -} - -constexpr int N = 1'000'000; -void performance_test() { - timer t; - hash_t hash = 0; - for (int operations = 0; operations < N; operations++) { - ll a = Random::integer<ll>(0, 1'000'000'000); - ll b = Random::integer<ll>(0, 1'000'000'000); - ll n = Random::integer<ll>(2, 1'000'000'000); - t.start(); - hash += powMod(a, b, n); - t.stop(); - } - if (t.time > 750) cerr << "too slow: " << t.time << FAIL; - cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; -} - -int main() { - stress_test(); - if (!sanitize) performance_test(); -} - diff --git a/test/math/permIndex.cpp b/test/math/permIndex.cpp index 8dcfd6b..2f19985 100644 --- a/test/math/permIndex.cpp +++ b/test/math/permIndex.cpp @@ -1,5 +1,4 @@ #include "../util.h" -#include <datastructures/pbds.cpp> #include <math/permIndex.cpp> void stress_test(int LIM) { @@ -7,13 +6,13 @@ void stress_test(int LIM) { for (int i = 0; i < LIM; i++) { int n = Random::integer<int>(1, 100); vector<ll> cur(n); - iota(all(cur), 0); + iota(begin(cur), end(cur), 0); ll expected = 0; do { auto got = permIndex(cur); if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; expected++; - } while (expected < 100 && next_permutation(all(cur))); + } while (expected < 100 && ranges::next_permutation(cur).found); queries += n; } cerr << "tested queries: " << queries << endl; @@ -23,7 +22,7 @@ constexpr int N = 500'000; void performance_test() { timer t; vector<ll> cur(N); - iota(all(cur), 0); + iota(begin(cur), end(cur), 0); reverse(cur.end() - 10, cur.end()); t.start(); auto hash = permIndex(cur); diff --git a/test/math/polynomial.cpp b/test/math/polynomial.cpp index f4a9486..adf3773 100644 --- a/test/math/polynomial.cpp +++ b/test/math/polynomial.cpp @@ -11,7 +11,7 @@ poly randomPoly(int deg) { ll eval(const vector<ll>& p, ll x) { ll res = 0; - for (ll i = 0, j = 1; i < sz(p); i++, j = (j * x) % mod) { + for (ll i = 0, j = 1; i < ssize(p); i++, j = (j * x) % mod) { res += j * p[i]; res %= mod; } @@ -50,7 +50,7 @@ void test_add() { auto c = a; c += b; - if (sz(c) > sz(a) && sz(c) > sz(b)) cerr << "error: wrong degree" << FAIL; + if (ssize(c) > ssize(a) && ssize(c) > ssize(b)) cerr << "error: wrong degree" << FAIL; for (int i = 0; i <= n + m + 7; i++) { ll x = Random::integer<ll>(0, mod); @@ -74,7 +74,7 @@ void test_mul() { auto b = randomPoly(m); auto c = a * b; - if (sz(c) > sz(a) + sz(b) - 1) cerr << "error: wrong degree" << FAIL; + if (ssize(c) > ssize(a) + ssize(b) - 1) cerr << "error: wrong degree" << FAIL; for (int i = 0; i <= n + m + 7; i++) { ll x = Random::integer<ll>(0, mod); @@ -97,8 +97,8 @@ void test_shift() { auto a = randomPoly(n); auto b = a << m; - if (sz(b) > sz(a)) cerr << sz(a) << " " << sz(b) << endl; - if (sz(b) > sz(a)) cerr << "error: wrong degree" << FAIL; + if (ssize(b) > ssize(a)) cerr << ssize(a) << " " << ssize(b) << endl; + if (ssize(b) > ssize(a)) cerr << "error: wrong degree" << FAIL; for (int i = 0; i <= n + 7; i++) { ll x = Random::integer<ll>(0, mod); @@ -126,8 +126,8 @@ void test_divmod() { auto b = randomPoly(m); auto [div, rem] = a.divmod(b); - if (sz(rem) > sz(b)) cerr << "error: wrong degree (rem)" << FAIL; - if (sz(div) > 1 + max<ll>(0, sz(a) - sz(b))) cerr << "error: wrong degree (div)" << FAIL; + if (ssize(rem) > ssize(b)) cerr << "error: wrong degree (rem)" << FAIL; + if (ssize(div) > 1 + max<ll>(0, ssize(a) - ssize(b))) cerr << "error: wrong degree (div)" << FAIL; for (int i = 0; i <= n + m; i++) { ll x = Random::integer<ll>(0, mod); @@ -142,7 +142,7 @@ void test_divmod() { } cerr << "tested divmod: " << queries << endl; } - + int main() { test_eval(); test_add(); diff --git a/test/math/primeSieve.cpp b/test/math/primeSieve.cpp index a675f6a..52570e2 100644 --- a/test/math/primeSieve.cpp +++ b/test/math/primeSieve.cpp @@ -18,7 +18,7 @@ void stress_test() { if (got) found.push_back(i); queries++; } - primes.resize(sz(found)); + primes.resize(ssize(found)); if (primes != found) cerr << "error: primes" << FAIL; for (int i = 0; i < 1'000'000; i++) { ll x = Random::integer<ll>(2, N); @@ -34,7 +34,7 @@ void performance_test() { timer t; t.start(); primeSieve(); - hash_t hash = sz(primes); + hash_t hash = ssize(primes); t.stop(); if (!sanitize) { if (t.time > 500) cerr << "too slow: " << t.time << FAIL; diff --git a/test/math/primitiveRoot.cpp b/test/math/primitiveRoot.cpp index cd0b388..6ad7429 100644 --- a/test/math/primitiveRoot.cpp +++ b/test/math/primitiveRoot.cpp @@ -63,7 +63,7 @@ void stress_test2() { map<ll, int> facts; factor(x, facts); if (x % 2 == 0) facts.erase(facts.find(2)); - bool expected = sz(facts) == 1; + bool expected = ssize(facts) == 1; if (x % 4 == 0) expected = false; if (x == 2 || x == 4) expected = true; diff --git a/test/math/shortModInv.cpp b/test/math/shortModInv.cpp index 0e91900..5e74907 100644 --- a/test/math/shortModInv.cpp +++ b/test/math/shortModInv.cpp @@ -7,7 +7,7 @@ void stress_test() { ll n = Random::integer<ll>(2, 1'000'000'000); ll x = 0; do { - x = Random::integer<ll>(0, n); + x = Random::integer<ll>(0, 1'000'000'000); } while (gcd(x, n) != 1); ll y = multInv(x, n); ll got = (x*y) % n; diff --git a/test/math/transforms/fft.cpp b/test/math/transforms/fft.cpp index 35f7e15..aa7ddd2 100644 --- a/test/math/transforms/fft.cpp +++ b/test/math/transforms/fft.cpp @@ -2,14 +2,14 @@ #include <math/transforms/fft.cpp> vector<cplx> to_cplx(const vector<ll>& in) { - vector<cplx> res(sz(in)); - for (int i = 0; i < sz(in); i++) res[i] = in[i]; + vector<cplx> res(ssize(in)); + for (int i = 0; i < ssize(in); i++) res[i] = in[i]; return res; } vector<ll> from_cplx(const vector<cplx>& in) { - vector<ll> res(sz(in)); - for (int i = 0; i < sz(in); i++) res[i] = llround(real(in[i])); + vector<ll> res(ssize(in)); + for (int i = 0; i < ssize(in); i++) res[i] = llround(real(in[i])); return res; } diff --git a/test/math/transforms/fftMul.cpp b/test/math/transforms/fftMul.cpp index 72fd4d8..38e7c73 100644 --- a/test/math/transforms/fftMul.cpp +++ b/test/math/transforms/fftMul.cpp @@ -5,21 +5,21 @@ #include <math/transforms/fftMul.cpp> vector<ll> from_cplx(const vector<cplx>& in) { - vector<ll> res(sz(in)); - for (int i = 0; i < sz(in); i++) res[i] = llround(real(in[i])); + vector<ll> res(ssize(in)); + for (int i = 0; i < ssize(in); i++) res[i] = llround(real(in[i])); return res; } vector<ll> naive(const vector<ll>& a, const vector<ll>& b) { vector<ll> res; for (ll i = 1;; i *= 2) { - if (sz(a) + sz(b) <= i) { + if (ssize(a) + ssize(b) <= i) { res.resize(i, 0); break; } } - for (int i = 0; i < sz(a); i++) { - for (int j = 0; j < sz(b); j++) { + for (int i = 0; i < ssize(a); i++) { + for (int j = 0; j < ssize(b); j++) { res[i+j] += a[i] * b[j]; } } diff --git a/test/math/transforms/multiplyBitwise.cpp b/test/math/transforms/multiplyBitwise.cpp index e89ba4e..f460204 100644 --- a/test/math/transforms/multiplyBitwise.cpp +++ b/test/math/transforms/multiplyBitwise.cpp @@ -6,13 +6,13 @@ vector<ll> naive(const vector<ll>& a, const vector<ll>& b) { vector<ll> res; for (ll i = 1;; i *= 2) { - if (sz(a) <= i && sz(b) <= i) { + if (ssize(a) <= i && ssize(b) <= i) { res.resize(i, 0); break; } } - for (int i = 0; i < sz(a); i++) { - for (int j = 0; j < sz(b); j++) { + for (int i = 0; i < ssize(a); i++) { + for (int j = 0; j < ssize(b); j++) { res[i&j] += a[i] * b[j]; } } diff --git a/test/math/transforms/multiplyFFT.cpp b/test/math/transforms/multiplyFFT.cpp index a54020c..f11ec45 100644 --- a/test/math/transforms/multiplyFFT.cpp +++ b/test/math/transforms/multiplyFFT.cpp @@ -6,13 +6,13 @@ vector<ll> naive(const vector<ll>& a, const vector<ll>& b) { vector<ll> res; for (ll i = 1;; i *= 2) { - if (sz(a) + sz(b) <= i) { + if (ssize(a) + ssize(b) <= i) { res.resize(i, 0); break; } } - for (int i = 0; i < sz(a); i++) { - for (int j = 0; j < sz(b); j++) { + for (int i = 0; i < ssize(a); i++) { + for (int j = 0; j < ssize(b); j++) { res[i+j] += a[i] * b[j]; } } diff --git a/test/math/transforms/multiplyNTT.cpp b/test/math/transforms/multiplyNTT.cpp index 90c606a..48a1aa3 100644 --- a/test/math/transforms/multiplyNTT.cpp +++ b/test/math/transforms/multiplyNTT.cpp @@ -6,13 +6,13 @@ vector<ll> naive(const vector<ll>& a, const vector<ll>& b) { vector<ll> res; for (ll i = 1;; i *= 2) { - if (sz(a) + sz(b) <= i) { + if (ssize(a) + ssize(b) <= i) { res.resize(i, 0); break; } } - for (int i = 0; i < sz(a); i++) { - for (int j = 0; j < sz(b); j++) { + for (int i = 0; i < ssize(a); i++) { + for (int j = 0; j < ssize(b); j++) { res[i+j] += a[i] * b[j]; res[i+j] %= mod; } diff --git a/test/math/transforms/seriesOperations.cpp b/test/math/transforms/seriesOperations.cpp index 29c91c7..1242537 100644 --- a/test/math/transforms/seriesOperations.cpp +++ b/test/math/transforms/seriesOperations.cpp @@ -24,7 +24,7 @@ namespace reference {//checked against yosupo } vector<ll> poly_deriv(vector<ll> a){ - for(int i = 0; i < sz(a)-1; i++) + for(int i = 0; i < ssize(a)-1; i++) a[i] = a[i+1] * (i+1) % mod; a.pop_back(); return a; @@ -32,8 +32,8 @@ namespace reference {//checked against yosupo vector<ll> poly_integr(vector<ll> a){ if(a.empty()) return {0}; - a.push_back(a.back() * powMod(sz(a), mod-2, mod) % mod); - for(int i = sz(a)-2; i > 0; i--) + a.push_back(a.back() * powMod(ssize(a), mod-2, mod) % mod); + for(int i = ssize(a)-2; i > 0; i--) a[i] = a[i-1] * powMod(i, mod-2, mod) % mod; a[0] = 0; return a; @@ -51,7 +51,7 @@ namespace reference {//checked against yosupo for(int len = 1; len < n; len *= 2){ vector<ll> p = poly_log(q, 2*len); for(int i = 0; i < 2*len; i++) - p[i] = (mod - p[i] + (i < sz(a) ? a[i] : 0)) % mod; + p[i] = (mod - p[i] + (i < ssize(a) ? a[i] : 0)) % mod; vector<ll> q2 = q; q2.resize(2*len); ntt(p), ntt(q2); diff --git a/test/missing.ignore b/test/missing.ignore new file mode 100644 index 0000000..c5f97bc --- /dev/null +++ b/test/missing.ignore @@ -0,0 +1,7 @@ +datastructures/pbds.cpp +other/pragmas.cpp +other/stuff.cpp +other/timed.cpp +tests/gcc5bug.cpp +tests/precision.cpp +tests/whitespace.cpp diff --git a/test/other/bitOps.cpp b/test/other/bitOps.cpp index 707c3f0..adaa49a 100644 --- a/test/other/bitOps.cpp +++ b/test/other/bitOps.cpp @@ -31,9 +31,7 @@ ll naive(ll x) { bits.push_back(x & 1); x >>= 1; } - reverse(all(bits)); - next_permutation(all(bits)); - reverse(all(bits)); + ranges::next_permutation(bits | views::reverse); x = 0; for (ll i = 0, j = 1; i < 63; i++, j <<= 1) { if (bits[i] != 0) x |= j; @@ -56,4 +54,4 @@ void test_nextPerm() { int main() { test_subsets(); test_nextPerm(); -}
\ No newline at end of file +} diff --git a/test/other/josephus2.cpp b/test/other/josephus2.cpp index 21154a1..074b481 100644 --- a/test/other/josephus2.cpp +++ b/test/other/josephus2.cpp @@ -4,8 +4,8 @@ template<ll O> ll naive(ll n, ll k) { vector<ll> state(n); - iota(all(state), O); - for (ll i = k-1; state.size() > 1; i = (i + k - 1) % sz(state)) { + iota(begin(state), end(state), O); + for (ll i = k-1; state.size() > 1; i = (i + k - 1) % ssize(state)) { state.erase(state.begin() + i); } return state[0]; @@ -15,7 +15,7 @@ void stress_test() { ll tests = 0; for (ll i = 1; i < 2'000; i++) { auto got = rotateLeft(i); - auto expected = naive<1>(i, 2); + auto expected = naive<0>(i, 2); if (got != expected) cerr << "error: " << i << FAIL; tests++; } diff --git a/test/other/josephusK.cpp b/test/other/josephusK.cpp index b6680b8..dab291b 100644 --- a/test/other/josephusK.cpp +++ b/test/other/josephusK.cpp @@ -5,8 +5,8 @@ template<ll O> ll naive(ll n, ll k) { vector<ll> state(n); - iota(all(state), O); - for (ll i = k-1; state.size() > 1; i = (i + k - 1) % sz(state)) { + iota(begin(state), end(state), O); + for (ll i = k-1; state.size() > 1; i = (i + k - 1) % ssize(state)) { state.erase(state.begin() + i); } return state[0]; diff --git a/test/other/pbs.cpp b/test/other/pbs.cpp index 4fa4470..4a8da2e 100644 --- a/test/other/pbs.cpp +++ b/test/other/pbs.cpp @@ -49,7 +49,7 @@ void stress_test() { for (int i=1; i<n; i++) { edges.emplace_back(Random::integer(0, i), i); } - shuffle(all(edges), Random::rng); + ranges::shuffle(edges, Random::rng); queries.clear(); for (int i=0; i<Q; i++) { auto x = Random::distinct(2, n); @@ -80,7 +80,7 @@ void performance_test() { for (int i=1; i<n; i++) { edges.emplace_back(Random::integer(0, i), i); } - shuffle(all(edges), Random::rng); + ranges::shuffle(edges, Random::rng); queries.clear(); for (int i=0; i<Q; i++) { auto x = Random::distinct(2, n); @@ -91,7 +91,7 @@ void performance_test() { t.start(); vector<int> ans = pbs(Q, MAX_OPERATIONS); t.stop(); - ll hash = accumulate(all(ans), 0LL); + ll hash = accumulate(begin(ans), end(ans), 0LL); if (t.time > 900) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; diff --git a/test/other/sos.cpp b/test/other/sos.cpp deleted file mode 100644 index 52b55ed..0000000 --- a/test/other/sos.cpp +++ /dev/null @@ -1,50 +0,0 @@ -#include "../util.h" - -vector<ll> sos(const vector<ll>& in) { - #include <other/sos.cpp> - return res; -} - -vector<ll> naive(const vector<ll>& in) { - vector<ll> res(sz(in)); - for (ll i = 0; i < sz(in); i++) { - for (ll j = 0; j <= i; j++) { - if ((i | j) == i) { - res[i] += in[j]; - } - } - } - return res; -} - -void stress_test() { - ll tests = 0; - for (ll i = 0; i < 1000; i++) { - int n = Random::integer<int>(1, 100); - auto in = Random::integers<ll>(n, -1000, 1000); - auto got = sos(in); - auto expected = naive(in); - if (got != expected) cerr << "error: " << i << FAIL; - tests += n; - } - cerr << "tested random queries: " << tests << endl; -} - -constexpr int N = 10'000'000; -void performance_test() { - timer t; - auto in = Random::integers<ll>(N, -1000, 1000); - t.start(); - auto res = sos(in); - t.stop(); - hash_t hash = 0; - for (ll x : res) hash += x; - if (t.time > 500) cerr << "too slow: " << t.time << FAIL; - cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; -} - -int main() { - stress_test(); - if (!sanitize) performance_test(); -} - diff --git a/test/string/deBruijn.cpp b/test/string/deBruijn.cpp index 09ba611..6bbbad9 100644 --- a/test/string/deBruijn.cpp +++ b/test/string/deBruijn.cpp @@ -5,13 +5,13 @@ bool isDeBruijn(string s, int n, int k) { ll expected = 1; for (ll i = 0; i < n; i++) expected *= k; - if (expected != sz(s)) return false; + if (expected != ssize(s)) return false; s += s; set<string_view> seen; - for (ll i = 0; 2*i < sz(s); i++) { + for (ll i = 0; 2*i < ssize(s); i++) { seen.insert(string_view(s).substr(i, n)); } - return sz(seen) == expected; + return ssize(seen) == expected; } void stress_test() { @@ -21,7 +21,7 @@ void stress_test() { auto [l, r] = Random::pair<char>('b', 'f'); auto got = deBruijn(n, l, r); if (!isDeBruijn(got, n, r - l + 1)) cerr << "error" << FAIL; - queries += sz(got); + queries += ssize(got); } cerr << "tested random queries: " << queries << endl; } @@ -32,7 +32,7 @@ void performance_test() { t.start(); auto res = deBruijn(N, '0', '1'); t.stop(); - hash_t hash = sz(res); + hash_t hash = ssize(res); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/string/duval.cpp b/test/string/duval.cpp index 0475386..88e2fb7 100644 --- a/test/string/duval.cpp +++ b/test/string/duval.cpp @@ -6,8 +6,8 @@ constexpr int N = 20'000'000; bool isLyndon(string_view s) { string t = string(s) + string(s); - for (ll i = 1; i < sz(s); i++) { - if (s >= t.substr(i, sz(s))) return false; + for (ll i = 1; i < ssize(s); i++) { + if (s >= t.substr(i, ssize(s))) return false; } return !s.empty(); } @@ -21,11 +21,11 @@ void stress_test_duval() { if (got.empty()) cerr << "error: a" << FAIL; if (got.front().first != 0) cerr << "error: b" << FAIL; if (got.back().second != n) cerr << "error: c" << FAIL; - for (int j = 1; j < sz(got); j++) { - if (got[j - 1].second != got[j].first) cerr << "error: d" << FAIL; + for (int j = 1; j < ssize(got); j++) { + if (got[j - 1].second != got[j].first) cerr << "error: d" << FAIL; } for (auto [l, r] : got) { - if (!isLyndon(string_view(s).substr(l, r-l))) cerr << "error: e" << FAIL; + if (!isLyndon(string_view(s).substr(l, r-l))) cerr << "error: e" << FAIL; } queries += n; } @@ -45,7 +45,7 @@ void performance_test_duval() { } int naive(string s) { - ll n = sz(s); + ll n = ssize(s); s += s; int res = 0; for (int i = 0; i < n; i++) { diff --git a/test/string/kmp.cpp b/test/string/kmp.cpp index f70a887..8ebeb64 100644 --- a/test/string/kmp.cpp +++ b/test/string/kmp.cpp @@ -2,8 +2,8 @@ #include <string/kmp.cpp> vector<int> naive(string_view s) { - vector<int> res(sz(s) + 1, -1); - for (int i = 0; i < sz(s); i++) { + vector<int> res(ssize(s) + 1, -1); + for (int i = 0; i < ssize(s); i++) { for (int j = 0; j <= i; j++) if (s.substr(0, j) == s.substr(i-j+1, j)) res[i+1] = j; diff --git a/test/string/longestCommonSubsequence.cpp b/test/string/longestCommonSubsequence.cpp index 68ec71b..8c32d61 100644 --- a/test/string/longestCommonSubsequence.cpp +++ b/test/string/longestCommonSubsequence.cpp @@ -4,19 +4,19 @@ bool isSubstr(string_view s, string_view sub) { int i = 0; for (char c : s) { - if (i < sz(sub) && c == sub[i]) i++; + if (i < ssize(sub) && c == sub[i]) i++; } - return i >= sz(sub); + return i >= ssize(sub); } string naive(string_view s, string_view t) { string res = ""; - for (ll i = 1; i < (1ll << sz(s)); i++) { + for (ll i = 1; i < (1ll << ssize(s)); i++) { string tmp; - for (ll j = 0; j < sz(s); j++) { + for (ll j = 0; j < ssize(s); j++) { if (((i >> j) & 1) != 0) tmp.push_back(s[j]); } - if (sz(tmp) >= sz(res) && isSubstr(t, tmp)) res = tmp; + if (ssize(tmp) >= ssize(res) && isSubstr(t, tmp)) res = tmp; } return res; } @@ -44,7 +44,7 @@ void performance_test() { t.start(); auto res = lcss(a, b); t.stop(); - hash_t hash = sz(res); + hash_t hash = ssize(res); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/string/lyndon.cpp b/test/string/lyndon.cpp index 905bf8e..154ba66 100644 --- a/test/string/lyndon.cpp +++ b/test/string/lyndon.cpp @@ -3,8 +3,8 @@ bool isLyndon(string_view s) { string t = string(s) + string(s); - for (ll i = 1; i < sz(s); i++) { - if (s >= t.substr(i, sz(s))) return false; + for (ll i = 1; i < ssize(s); i++) { + if (s >= t.substr(i, ssize(s))) return false; } return !s.empty(); } @@ -12,8 +12,8 @@ bool isLyndon(string_view s) { vector<string> naive(ll n, char mi, char ma) { vector<string> res; auto dfs = [&](auto&& self, string pref)->void{ - if (sz(pref) <= n && isLyndon(pref)) res.push_back(pref); - if (sz(pref) >= n) return; + if (ssize(pref) <= n && isLyndon(pref)) res.push_back(pref); + if (ssize(pref) >= n) return; for (char c = mi; c <= ma; c++) { self(self, pref + c); } @@ -39,7 +39,7 @@ void stress_test() { auto got = fast(n, l, r); auto expected = naive(n, l, r); if (got != expected) cerr << "error" << FAIL; - queries += sz(expected); + queries += ssize(expected); } cerr << "tested random queries: " << queries << endl; } @@ -50,7 +50,7 @@ void performance_test() { t.start(); auto res = fast(N, 'a', 'f'); t.stop(); - hash_t hash = sz(res); + hash_t hash = ssize(res); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/string/manacher.cpp b/test/string/manacher.cpp index bde1c89..ada0486 100644 --- a/test/string/manacher.cpp +++ b/test/string/manacher.cpp @@ -2,16 +2,16 @@ #include <string/manacher.cpp> vector<int> naive(string_view s) { - vector<int> res(2 * sz(s) + 1); - for (int i = 0; i < sz(s); i++) { //odd palindromes + vector<int> res(2 * ssize(s) + 1); + for (int i = 0; i < ssize(s); i++) { //odd palindromes int j = 2*i+1; - while (i+res[j] < sz(s) && i-res[j] >= 0 && s[i-res[j]] == s[i+res[j]]) res[j]++; + while (i+res[j] < ssize(s) && i-res[j] >= 0 && s[i-res[j]] == s[i+res[j]]) res[j]++; res[j]*=2; res[j]--; } - for (int i = 0; i <= sz(s); i++) { //even palindromes + for (int i = 0; i <= ssize(s); i++) { //even palindromes int j = 2*i; - while (i+res[j] < sz(s) && i-res[j]-1 >= 0 && s[i-res[j]-1] == s[i+res[j]]) res[j]++; + while (i+res[j] < ssize(s) && i-res[j]-1 >= 0 && s[i-res[j]-1] == s[i+res[j]]) res[j]++; res[j] *= 2; } return res; diff --git a/test/string/rollingHash.cpp b/test/string/rollingHash.cpp index ba8fc40..d19a153 100644 --- a/test/string/rollingHash.cpp +++ b/test/string/rollingHash.cpp @@ -3,7 +3,7 @@ string thueMorse(ll n) { string res = "a"; - while (sz(res) < n) { + while (ssize(res) < n) { string tmp = res; for (char& c : tmp) c ^= 1; res += tmp; @@ -12,7 +12,7 @@ string thueMorse(ll n) { } auto getHash(const string& s) { - return Hash(s)(0, sz(s)); + return Hash(s)(0, ssize(s)); } void testThueMorse() { @@ -20,13 +20,13 @@ void testThueMorse() { set<string> expected; string s = thueMorse(1000); Hash h(s); - for (int l = 0; l < sz(s); l++) { - for (int r = l + 1; r <= sz(s); r++) { + for (int l = 0; l < ssize(s); l++) { + for (int r = l + 1; r <= ssize(s); r++) { got.insert(h(l, r)); expected.insert(s.substr(l, r - l)); } } - if (sz(got) != sz(expected)) cerr << "error: thueMorse" << FAIL; + if (ssize(got) != ssize(expected)) cerr << "error: thueMorse" << FAIL; cerr << "thueMorse: ok" << endl; } @@ -43,13 +43,13 @@ void testSmall(int depth) { auto dfs = [&](auto&& self, string pref)->void { expected++; got.insert(getHash(pref)); - if(sz(pref) >= depth) return; + if(ssize(pref) >= depth) return; for (char c = 'a'; c <= 'z'; c++) { self(self, pref + c); } }; dfs(dfs, ""); - if (sz(got) != expected) cerr << "error: small" << FAIL; + if (ssize(got) != expected) cerr << "error: small" << FAIL; cerr << "small: ok" << endl; } @@ -58,13 +58,13 @@ void stress_test() { set<string> expected; string s = Random::string(1000, "abc"); Hash h(s); - for (int l = 0; l < sz(s); l++) { - for (int r = l + 1; r <= sz(s); r++) { + for (int l = 0; l < ssize(s); l++) { + for (int r = l + 1; r <= ssize(s); r++) { got.insert(h(l, r)); expected.insert(s.substr(l, r - l)); } } - if (sz(got) != sz(expected)) cerr << "error: stress test" << FAIL; + if (ssize(got) != ssize(expected)) cerr << "error: stress test" << FAIL; cerr << "stress test: ok" << endl; } diff --git a/test/string/rollingHashCf.cpp b/test/string/rollingHashCf.cpp index 9acce2d..d0f90aa 100644 --- a/test/string/rollingHashCf.cpp +++ b/test/string/rollingHashCf.cpp @@ -5,7 +5,7 @@ constexpr ll RandomQ = 318LL << 53; string thueMorse(ll n) { string res = "a"; - while (sz(res) < n) { + while (ssize(res) < n) { string tmp = res; for (char& c : tmp) c ^= 1; res += tmp; @@ -14,7 +14,7 @@ string thueMorse(ll n) { } auto getHash(const string& s) { - return Hash(s, RandomQ)(0, sz(s)); + return Hash(s, RandomQ)(0, ssize(s)); } void testThueMorse() { @@ -22,13 +22,13 @@ void testThueMorse() { set<string> expected; string s = thueMorse(1000); Hash h(s, RandomQ); - for (int l = 0; l < sz(s); l++) { - for (int r = l + 1; r <= sz(s); r++) { + for (int l = 0; l < ssize(s); l++) { + for (int r = l + 1; r <= ssize(s); r++) { got.insert(h(l, r)); expected.insert(s.substr(l, r - l)); } } - if (sz(got) != sz(expected)) cerr << "error: thueMorse" << FAIL; + if (ssize(got) != ssize(expected)) cerr << "error: thueMorse" << FAIL; cerr << "thueMorse: ok" << endl; } @@ -45,13 +45,13 @@ void testSmall(int depth) { auto dfs = [&](auto&& self, string pref)->void { expected++; got.insert(getHash(pref)); - if(sz(pref) >= depth) return; + if(ssize(pref) >= depth) return; for (char c = 'a'; c <= 'z'; c++) { self(self, pref + c); } }; dfs(dfs, ""); - if (sz(got) != expected) cerr << "error: small" << FAIL; + if (ssize(got) != expected) cerr << "error: small" << FAIL; cerr << "small: ok" << endl; } @@ -60,13 +60,13 @@ void stress_test() { set<string> expected; string s = Random::string(1000, "abc"); Hash h(s, RandomQ); - for (int l = 0; l < sz(s); l++) { - for (int r = l + 1; r <= sz(s); r++) { + for (int l = 0; l < ssize(s); l++) { + for (int r = l + 1; r <= ssize(s); r++) { got.insert(h(l, r)); expected.insert(s.substr(l, r - l)); } } - if (sz(got) != sz(expected)) cerr << "error: stress test" << FAIL; + if (ssize(got) != ssize(expected)) cerr << "error: stress test" << FAIL; cerr << "stress test: ok" << endl; } diff --git a/test/string/suffixArray.cpp b/test/string/suffixArray.cpp index 1314155..37049f6 100644 --- a/test/string/suffixArray.cpp +++ b/test/string/suffixArray.cpp @@ -2,9 +2,9 @@ #include <string/suffixArray.cpp> vector<int> naive(string_view s) { - vector<int> SA(sz(s)); - iota(all(SA), 0); - sort(all(SA), [s](int a, int b){ + vector<int> SA(ssize(s)); + iota(begin(SA), end(SA), 0); + ranges::sort(SA, [s](int a, int b){ return s.substr(a) < s.substr(b); }); return SA; @@ -12,7 +12,7 @@ vector<int> naive(string_view s) { int lcp(string_view s, int x, int y) { int res = 0; - while (x + res < sz(s) && y + res < sz(s) && s[x + res] == s[y + res]) res++; + while (x + res < ssize(s) && y + res < ssize(s) && s[x + res] == s[y + res]) res++; return res; } @@ -50,7 +50,7 @@ void performance_test() { SuffixArray sa(s); t.stop(); hash_t hash = 0; - for (int i = 0; i < sz(sa.SA); i++) hash += i*sa.SA[i]; + for (int i = 0; i < ssize(sa.SA); i++) hash += i*sa.SA[i]; if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/string/suffixAutomaton.cpp b/test/string/suffixAutomaton.cpp index 146ae11..dacbb83 100644 --- a/test/string/suffixAutomaton.cpp +++ b/test/string/suffixAutomaton.cpp @@ -4,10 +4,10 @@ pair<int, int> naive(string_view s, string_view t) { int pos = 0; int len = 0; - for (int j = 0; j < sz(t); j++) { - for (int i = 0; i < sz(s); i++) { + for (int j = 0; j < ssize(t); j++) { + for (int i = 0; i < ssize(s); i++) { int cur = 0; - while (i+cur < sz(s) && j+cur < sz(t) && s[i+cur] == t[j+cur]) cur++; + while (i+cur < ssize(s) && j+cur < ssize(t) && s[i+cur] == t[j+cur]) cur++; if (cur > len) { pos = j; len = cur; @@ -43,7 +43,7 @@ void performance_test() { SuffixAutomaton sa(s); t.stop(); hash_t hash = 0; - for (ll c = 0; c < sz(s);) { + for (ll c = 0; c < ssize(s);) { int m = Random::integer<int>(1, 1000); s = Random::string(m, "abc"); t.start(); diff --git a/test/string/suffixTree.cpp b/test/string/suffixTree.cpp index 9181c2e..6f3d912 100644 --- a/test/string/suffixTree.cpp +++ b/test/string/suffixTree.cpp @@ -2,8 +2,8 @@ #include <string/suffixTree.cpp> vector<string> naive(string_view s) { - vector<string> res(sz(s)); - for (ll i = 0; i < sz(s); i++) { + vector<string> res(ssize(s)); + for (ll i = 0; i < ssize(s); i++) { res[i] = s.substr(i); } return res; @@ -19,7 +19,7 @@ void stress_test() { auto dfs = [&](auto&& self, string pref, ll node) -> void { auto& [l, r, _, next] = st.tree[node]; if (l >= 0) pref += s.substr(l, r - l); - if (pref.back() == '#') got[n + 1 - sz(pref)] = pref; + if (pref.back() == '#') got[n + 1 - ssize(pref)] = pref; for (auto [__, j] : next) { self(self, pref, j); } @@ -39,7 +39,7 @@ void performance_test() { t.start(); SuffixTree st(s); t.stop(); - hash_t hash = sz(st.tree); + hash_t hash = ssize(st.tree); if (t.time > 500) cerr << "too slow: " << t.time << FAIL; cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; } diff --git a/test/string/z.cpp b/test/string/z.cpp index f190482..a11984a 100644 --- a/test/string/z.cpp +++ b/test/string/z.cpp @@ -2,9 +2,9 @@ #include <string/z.cpp> vector<int> naive(const string& s) { - vector<int> res(sz(s)); - for (int i = 1; i < sz(s); i++) { - while (i + res[i] < sz(s) && s[res[i]] == s[i + res[i]]) res[i]++; + vector<int> res(ssize(s)); + for (int i = 1; i < ssize(s); i++) { + while (i + res[i] < ssize(s) && s[res[i]] == s[i + res[i]]) res[i]++; } return res; } diff --git a/test/test.sh b/test/test.sh deleted file mode 100755 index a3e6ea9..0000000 --- a/test/test.sh +++ /dev/null @@ -1,131 +0,0 @@ -#!/bin/bash -set -e -cd "$(dirname "$0")" -ulimit -s 4000000 -export MALLOC_PERTURB_="$((2#01011001))" -shopt -s lastpipe -export UBSAN_OPTIONS=halt_on_error=1:print_stacktrace=1 - -declare -A cppstandard -cppstandard["string/suffixArray.cpp"]="gnu++20" -cppstandard["other/pbs.cpp"]="gnu++20" -seedmacro="" -compilerflags="-O2" -debugflags="-O2 -fsanitize=address,undefined" - -process_awk() { - awk_file=$(realpath --relative-to="${PWD}" "${1}") - cpp_file=${awk_file%.awk} - folder=$(dirname $awk_file) - #echo "$awk_file" - mkdir -p "./awk/$folder" - awk -f "$awk_file" < "../content/$cpp_file" > "./awk/$cpp_file" -} - -test_file() { - file=$(realpath --relative-to="${PWD}" "${1}") - echo "$file:" - - echo "compiling with sanitizer..." - std="gnu++17" - if [[ -v cppstandard[$file] ]]; then - std=${cppstandard[$file]} - fi - g++ -std=$std "$file" -I ./awk/ -I ../content/ $debugflags -Wall -Wextra -Wshadow -Werror -DSANITIZE $seedmacro - echo "running with sanitizer..." - timeout --foreground 90s ./a.out - rm ./a.out - - echo "compiling -O2..." - std="gnu++17" - if [[ -v cppstandard[$file] ]]; then - std=${cppstandard[$file]} - fi - g++ -std=$std "$file" -I ./awk/ -I ../content/ $compilerflags -Wall -Wextra -Wshadow -Werror $seedmacro - echo "running -O2..." - timeout --foreground 60s ./a.out - echo "" - rm ./a.out -} - -list_missing() { - declare -A ignore - ignore["other/bitOps.cpp"]=1 - ignore["other/pragmas.cpp"]=1 - ignore["other/stuff.cpp"]=1 - ignore["other/timed.cpp"]=1 - ignore["tests/gcc5bug.cpp"]=1 - ignore["tests/precision.cpp"]=1 - ignore["tests/whitespace.cpp"]=1 - - total=0 - missing=0 - - if [[ ! -v $1 ]]; then - echo "missing tests:" - fi - find ../content/ -type f -name '*.cpp' -print0 | sort -z | while read -d $'\0' file - do - total=$((total+1)) - file=${file#../content/} - if [ ! -f "$file" ] && [[ ! -v ignore["$file"] ]]; then - missing=$((missing+1)) - if [[ ! -v $1 ]]; then - echo " $file" - fi - fi - done - if [[ -v $1 ]]; then - covered=$((total-missing)) - coverage=$((100*covered/total)) - echo "REQUIRED=$(( total < 4 ? 0 : total - 4 ))" - echo "TOTAL=$total" - echo "COVERED=$covered" - echo "MISSING=$missing" - fi -} - -coverage() { - list_missing 1 -} - -rm -rf ./awk/ -find . -type f -path '*.awk' -print0 | sort -z | while read -d $'\0' file -do - process_awk "$file" -done - -if [ "$#" -ne 0 ]; then - for arg in "$@" - do - if [[ $arg == "--awk" ]]; then - echo "processed all awk files" - elif [[ $arg == "--missing" ]]; then - list_missing - elif [[ $arg == "--coverage" ]]; then - coverage - elif [[ $arg == --seed=* ]]; then - seedmacro="-DSEED=${arg:7}ll" - elif [[ $arg == "--debug" ]]; then - debugflags="-g -fsanitize=address,undefined" - elif [ -d "$arg" ]; then - dir=$(realpath --relative-to="${PWD}" "$arg") - find . -type f -path "./${dir}/*.cpp" -not -path './awk/*' -print0 | sort -z | while read -d $'\0' file - do - test_file "$file" - done - elif [ -f "$arg" ]; then - test_file "$arg" - else - echo "did not recognize: $arg" - exit 1 - fi - done -else - find . -type f -path '*.cpp' -not -path './awk/*' -print0 | sort -z | while read -d $'\0' file - do - test_file "$file" - done - list_missing -fi - diff --git a/test/util.h b/test/util.h index 8de393d..880ff04 100644 --- a/test/util.h +++ b/test/util.h @@ -1,16 +1,12 @@ #include <bits/stdc++.h> using namespace std; -#define all(x) std::begin(x), std::end(x) -#define sz(x) (ll)std::size(x) - #ifdef SANITIZE constexpr bool sanitize = true; #else constexpr bool sanitize = false; #endif - using ll = long long; using lll = __int128; using ld = long double; @@ -20,6 +16,16 @@ namespace INT {constexpr int INF = 0x3FFF'FFFF;} namespace LL {constexpr ll INF = 0x3FFF'FFFF'FFFF'FFFFll;} namespace LD {constexpr ld INF = numeric_limits<ld>::infinity();} +#ifdef SANITIZE +template<typename T> +T _lg_check(T n) { + assert(n > 0); + return __lg(n); +} + +#define __lg _lg_check +#endif + namespace details { template<typename T = ll> bool isPrime(T x) { @@ -116,7 +122,7 @@ namespace Random { std::string string(std::size_t n, string_view chars) { std::string res(n, '*'); - for (char& c : res) c = chars[integer(sz(chars))]; + for (char& c : res) c = chars[integer(ssize(chars))]; return res; } @@ -175,6 +181,30 @@ namespace Random { exit(1); } +namespace detail { + double benchmark() { + mt19937 rng(734820734); + vector<unsigned> a(10000000); + for (unsigned &x: a) x = rng(); + chrono::steady_clock::time_point start = chrono::steady_clock::now(); + vector<unsigned> dp(ssize(a)+1, numeric_limits<unsigned>::max()); + int res = 0; + for (unsigned x: a) { + auto it = ranges::upper_bound(dp, x); + res = max(res, (int)(it - begin(dp))); + *it = x; + } + chrono::steady_clock::time_point end = chrono::steady_clock::now(); + assert(res == 6301); + double t = + chrono::duration_cast<chrono::duration<double, milli>>(end - start) + .count(); + return 30/t; + } + + double speed = benchmark(); +} + struct timer { bool running = false; double time = 0; @@ -190,7 +220,7 @@ struct timer { auto end = chrono::steady_clock::now(); if (!running) cerr << "timer not running!" << FAIL; running = false; - time += chrono::duration_cast<chrono::duration<double, milli>>(end - begin).count(); + time += chrono::duration_cast<chrono::duration<double, milli>>(end - begin).count() * detail::speed; } void reset() { @@ -215,7 +245,7 @@ namespace c20 { return {{a[I]...}}; } } - + template<class T, std::size_t N> constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N]) { return c20::detail::to_array_impl(a, std::make_index_sequence<N>{}); @@ -264,9 +294,9 @@ public: Graph(int n) : adj(n) {} - int m() const {return sz(edges);} - int n() const {return sz(adj);} - int deg(int x) const {return sz(adj[x]);} + int m() const {return ssize(edges);} + int n() const {return ssize(adj);} + int deg(int x) const {return ssize(adj[x]);} Graph& clear() { adj.assign(adj.size(), {}); @@ -278,33 +308,33 @@ public: if (!LOOPS && from == to) return false; if (!MULTI && adj[from].find(to) != adj[from].end()) return false; edges.emplace_back(from, to, w); - _addAdj(sz(edges) - 1); + _addAdj(ssize(edges) - 1); return true; } Graph& reverse() { for (auto& e : edges) swap(e.from, e.to); adj.assign(adj.size(), {}); - for (int i = 0; i < sz(edges); i++) _addAdj(i); + for (int i = 0; i < ssize(edges); i++) _addAdj(i); return *this; } Graph& shuffle() { - std::shuffle(all(edges), Random::rng); + ranges::shuffle(edges, Random::rng); if constexpr (!DIR) { for (auto& e : edges) { if (Random::integer(0, 2)) swap(e.from, e.to); } } adj.assign(adj.size(), {}); - for (int i = 0; i < sz(edges); i++) _addAdj(i); + for (int i = 0; i < ssize(edges); i++) _addAdj(i); return *this; } Graph& permutate() { vector<int> perm(n()); - iota(all(perm), 0); - std::shuffle(all(perm), Random::rng); + iota(begin(perm), end(perm), 0); + ranges::shuffle(perm, Random::rng); for (auto& e : edges) { e.from = perm[e.from]; e.to = perm[e.to]; @@ -382,7 +412,7 @@ public: } } } - std::shuffle(all(tmp), Random::rng); + ranges::shuffle(tmp, Random::rng); for (auto [a, b] : tmp) { if (todo <= 0) break; if (addEdge(a, b)) todo--; @@ -420,3 +450,10 @@ ld float_error(ld given, ld expected) { } return numeric_limits<ld>::infinity(); } + +#include <ext/pb_ds/assoc_container.hpp> +template<typename T> +using Tree = __gnu_pbds::tree<T, __gnu_pbds::null_type, less<T>, + __gnu_pbds::rb_tree_tag, + __gnu_pbds::tree_order_statistics_node_update>; + |
