diff options
65 files changed, 1256 insertions, 332 deletions
@@ -220,3 +220,9 @@ TSWLatexianTemp* *-tags.tex *~ + +# files produced by the testing system +*.test +*.ok +*.tmp.cpp +*.d @@ -1,5 +1,28 @@ -all: - latexmk -pdf tcr -clean: - latexmk -c tcr + +LATEXMK = latexmk -interaction=nonstopmode + +tcr.pdf: FORCE + $(LATEXMK) -pdf tcr + +pdf: tcr.pdf tcr-opt.pdf + +tcr-opt.pdf: FORCE + $(LATEXMK) -pdf -jobname=tcr-opt -usepretex="\def\OPTIONAL{}" tcr + +all: pdf test + +test: + +gmake -f TestMakefile + +clean: cleanpdf cleantest + +cleanpdf: + $(LATEXMK) -C tcr + $(LATEXMK) -C -jobname=tcr-opt tcr rm -f *.thm + +cleantest: + +-gmake -f TestMakefile cleantest + +FORCE: +.PHONY: all pdf test clean cleanpdf cleantest FORCE diff --git a/README.md b/README.md deleted file mode 100644 index 7a518c5..0000000 --- a/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Team Contest Reference
-a modified version of the team contest reference for team ChaosKITs from Karlsruhe, Germany.
-https://github.com/pjungeblut/ChaosKITs
diff --git a/TestMakefile b/TestMakefile new file mode 100644 index 0000000..58d2678 --- /dev/null +++ b/TestMakefile @@ -0,0 +1,61 @@ +with-defines-subsets = $(if $2,$(call with-defines-subsets,$1,$(call dropfirst,$2)) $(call with-defines-subsets,$1.$(firstword $2),$(call dropfirst,$2)),$1) +dropfirst = $(wordlist 2,$(words $1),$1) + +source = $(dir $*)$(firstword $(subst ., ,$(notdir $*))) +flags = $(call dropfirst,$(subst ., -D,$(notdir $*))) $(CPPFLAGS) +predep = $(patsubst $(source)=%,%,$(filter $(source)=%,$(PREDEPS))) + +TESTS := \ + $(call with-defines-subsets,datastructures/test/segmentTree,SEGTREE_MUL SEGTREE_INIT_DEFAULT SEGTREE_RANGE_UPDATE) \ + $(call with-defines-subsets,datastructures/test/segmentTree.SEGTREE_FIRST_NEG,SEGTREE_INIT_DEFAULT) \ + $(call with-defines-subsets,datastructures/test/lazyPropagation,SEGTREE_FIRST_NEG SEGTREE_INIT_DEFAULT) \ + $(call with-defines-subsets,datastructures/test/lazyPropagation.SEGTREE_MAX,SEGTREE_INIT_DEFAULT) \ + datastructures/test/waveletTree \ + datastructures/test/fenwickTree \ + datastructures/test/fenwickTree2 \ + datastructures/test/treap2 \ + datastructures/test/sparseTable \ + datastructures/test/sparseTableDisjoint \ + datastructures/test/monotonicConvexHull \ + datastructures/test/persistent \ + graph/test/binary_lifting \ + graph/test/LCA_sparse \ + math/test/binomial0 + +# Dependencies which must be present before generating the .d file. +PREDEPS := \ + datastructures/test/segmentTree=datastructures/test/segmentTree.tmp.cpp \ + datastructures/test/lazyPropagation=datastructures/test/lazyPropagation.tmp.cpp + +CPPFLAGS := -include test.h -std=gnu++20 -Wall -Wextra \ + -Werror -fsanitize=address,undefined -fno-sanitize-recover -g + +test: $(TESTS:=.ok) + +cleantest: + rm -f $(TESTS:=.test) $(TESTS:=.ok) $(TESTS:=.d) \ + datastructures/test/segmentTree.tmp.cpp + +%.ok: %.test + timeout -v 2 ./$< + @touch $@ + +.SECONDEXPANSION: +%.test: $$(source).cpp + g++ $(flags) -o $@ $< + +%.d: $$(source).cpp $$(predep) + g++ -MM -MT '$*.test $*.d' -MF '$@' $(flags) $< + +datastructures/test/segmentTree.tmp.cpp: datastructures/segmentTree.cpp \ + datastructures/test/segmentTree.awk + awk -f datastructures/test/segmentTree.awk $< > $@ +datastructures/test/lazyPropagation.tmp.cpp: \ + datastructures/lazyPropagation.cpp datastructures/test/lazyPropagation.awk + awk -f datastructures/test/lazyPropagation.awk $< > $@ + +.PHONY: test cleantest +.SECONDARY: $(TESTS:=.test) + +include $(TESTS:=.d) + diff --git a/datastructures/RMQ.cpp b/datastructures/RMQ.cpp deleted file mode 100644 index 401cca4..0000000 --- a/datastructures/RMQ.cpp +++ /dev/null @@ -1,27 +0,0 @@ -vector<int> values; -vector<vector<int>> rmq; - -int select(int a, int b) { - return values[a] <= values[b] ? a : b; -} - -void build() { - for(int i = 0, s = 1, ss = 1; s <= sz(values); ss=s, s*=2, i++) { - for(int l = 0; l + s <= sz(values); l++) { - if(i == 0) rmq[0][l] = l; - else { - int r = l + ss; - rmq[i][l] = select(rmq[i-1][l], rmq[i-1][r]]); -}}}} - -void init(const vector<int>& v) { - values = v; - rmq = vector<vector<int>>(__lg(sz(values))+1, vector<int>(sz(values))); - build(); -} - -int query(int l, int r) { - if(l >= r) return l; - int s = __lg(r-l); r = r - (1 << s); - return select(rmq[s][l],rmq[s][r]); -} diff --git a/datastructures/datastructures.tex b/datastructures/datastructures.tex index d35dfb0..43d6dd8 100644 --- a/datastructures/datastructures.tex +++ b/datastructures/datastructures.tex @@ -3,23 +3,25 @@ \begin{algorithm}{Segmentbaum} \begin{methods} \method{SegTree}{baut den Baum auf}{n} - \method{query}{findet Summe über [l, r)}{\log(n)} + \method{query}{findet Summe über $[l, r)$}{\log(n)} \method{update}{ändert einen Wert}{\log(n)} \end{methods} \sourcecode{datastructures/segmentTree.cpp} \subsubsection{Lazy Propagation} Assignment modifications, sum queries \\ - \method{lower\_bound}{erster Index in [l, r) $\geq$ x (erfordert max-combine)}{\log(n)} + \method{lower\_bound}{erster Index in $[l, r) \geq x$ (erfordert max-combine)}{\log(n)} \sourcecode{datastructures/lazyPropagation.cpp} \end{algorithm} \begin{algorithm}{Wavelet Tree} \begin{methods} - \method{Constructor}{baut den Baum auf}{n\*\log(n)} - \method{kth}{sort $[l, r)[k]$}{\log(n)} - \method{countSmaller}{Anzahl elemente in $[l, r)$ kleiner als $k$}{\log(n)} + \method{WaveletTree}{baut den Baum auf}{n\*\log(A)} + \method{kth}{sort $[l, r)[k]$}{\log(A)} + \method{countSmaller}{Anzahl elemente in $[l, r)$ kleiner als $k$}{\log(A)} \end{methods} + $A$ ist die Gr\"o\ss e des Eingabebereichs, d.h. + $\mathit{max} - \mathit{min}$. \sourcecode{datastructures/waveletTree.cpp} \end{algorithm} \columnbreak @@ -27,15 +29,15 @@ \begin{algorithm}{Fenwick Tree} \begin{methods} \method{init}{baut den Baum auf}{n\*\log(n)} - \method{prefix\_sum}{summe von [0, i]}{\log(n)} + \method{prefix\_sum}{summe von $[0, i)$}{\log(n)} \method{update}{addiert ein Delta zu einem Element}{\log(n)} \end{methods} \sourcecode{datastructures/fenwickTree.cpp} \begin{methods} \method{init}{baut den Baum auf}{n\*\log(n)} - \method{prefix\_sum}{summe von [0, i]}{\log(n)} - \method{update}{addiert ein Delta zu allen Elementen [l, r)}{\log(n)} + \method{prefix\_sum}{summe von $[0, i)$}{\log(n)} + \method{update}{addiert ein Delta zu allen Elementen $[l, r)$}{\log(n)} \end{methods} \sourcecode{datastructures/fenwickTree2.cpp} \end{algorithm} @@ -47,7 +49,7 @@ \begin{algorithm}{(Implicit) Treap (Cartesian Tree)} \begin{methods} - \method{insert}{fügt wert $\mathit{val}$ an stelle $i$ ein (verschiebt alle Positionen >= $i$)}{\log(n)} + \method{insert}{fügt wert $\mathit{val}$ an stelle $i$ ein (verschiebt alle Positionen $\geq i$)}{\log(n)} \method{remove}{löscht werte $[i,i+\mathit{count})$}{\log(n)} \end{methods} \sourcecode{datastructures/treap2.cpp} @@ -56,7 +58,7 @@ \begin{algorithm}{Range Minimum Query} \begin{methods} \method{init}{baut Struktur auf}{n\*\log(n)} - \method{queryIdempotent}{Index des Minimums in [l, r)}{1} + \method{queryIdempotent}{Index des Minimums in $[l, r)$}{1} \end{methods} \begin{itemize} \item \code{better}-Funktion muss idempotent sein! @@ -64,13 +66,21 @@ \sourcecode{datastructures/sparseTable.cpp} \end{algorithm} +\begin{algorithm}[optional]{Range Aggregate Query} + \begin{methods} + \method{init}{baut Struktur auf}{n\*\log(n)} + \method{query}{Aggregat über $[l,r)$}{1} + \end{methods} + \sourcecode{datastructures/sparseTableDisjoint.cpp} +\end{algorithm} + \begin{algorithm}{STL-Bitset} \sourcecode{datastructures/bitset.cpp} \end{algorithm} \begin{algorithm}{Link-Cut-Tree} \begin{methods} - \method{Constructor}{baut Wald auf}{n} + \method{LCT}{baut Wald auf}{n} \method{connected}{prüft ob zwei Knoten im selben Baum liegen}{\log(n)} \method{link}{fügt $\{x,y\}$ Kante ein}{\log(n)} \method{cut}{entfernt $\{x,y\}$ Kante}{\log(n)} @@ -80,23 +90,35 @@ \end{methods} \sourcecode{datastructures/LCT.cpp} \end{algorithm} -\clearpage - -\begin{algorithm}{Lichao} - \sourcecode{datastructures/lichao.cpp} -\end{algorithm} +\columnbreak \begin{algorithm}{Policy Based Data Structures} - \textbf{Wichtig:} Verwende \code{p.swap(p2)} anstatt \code{swap(p, p2)}! - \sourcecode{datastructures/stlPriorityQueue.cpp} - \columnbreak \sourcecode{datastructures/pbds.cpp} \end{algorithm} -\begin{algorithm}{Lower/Upper Envelope (Convex Hull Optimization)} - Um aus einem lower envelope einen upper envelope zu machen (oder umgekehrt), einfach beim Einfügen der Geraden $m$ und $b$ negieren. +\begin{algorithm}{Lower Envelope (Convex Hull Optimization)} + Um aus einem Lower Envelope einen Upper Envelope zu machen (oder + umgekehrt), einfach beim Einfügen der Geraden $m$ und $b$ negieren. + \subsubsection{Monotonic} + \begin{methods} + \method{add}{add line $mx + b$, $m$ is decreasing}{1} + \method{query}{minimum value at $x$, $x$ is increasing}{1} + \end{methods} \sourcecode{datastructures/monotonicConvexHull.cpp} + \subsubsection{Dynamic} + \begin{methods} + \method{add}{add line $mx + b$}{\log(n)} + \method{query}{minimum value at $x$}{\log(n)} + \end{methods} \sourcecode{datastructures/dynamicConvexHull.cpp} + \subsubsection{Li Chao Tree} + Every pair of functions has at most one intersection. + + \begin{methods} + \method{insert}{add function}{\log(|xs|)} + \method{query}{minimum value at $x$, $x \in xs$}{\log(|xs|)} + \end{methods} + \sourcecode{datastructures/lichao.cpp} \end{algorithm} \begin{algorithm}{Union-Find} @@ -108,6 +130,17 @@ \end{methods} \sourcecode{datastructures/unionFind.cpp} \end{algorithm} + +\begin{algorithm}[optional]{Union-Find with size} + \begin{methods} + \method{init}{legt $n$ einzelne Unions an}{n} + \method{findSet}{findet den Repräsentanten}{\log(n)} + \method{unionSets}{vereint 2 Mengen}{\log(n)} + \method{getSize}{zählt Elemente in Menge, die $i$ enthält}{\log(n)} + \method{m\*findSet + n\*unionSets}{Folge von Befehlen}{n+m\*\alpha(n)} + \end{methods} + \sourcecode{datastructures/unionFind2.cpp} +\end{algorithm} \columnbreak \begin{algorithm}{Persistent} @@ -120,14 +153,6 @@ \sourcecode{datastructures/persistentArray.cpp} \end{algorithm} -\begin{algorithm}[optional]{Range Minimum Query} - \begin{methods} - \method{init}{baut Struktur auf}{n\*\log(n)} - \method{query}{Index des Minimums in [l, r)}{1} - \end{methods} - \sourcecode{datastructures/RMQ.cpp} -\end{algorithm} - \begin{algorithm}[optional]{Erste unbenutzte natürliche Zahl} \begin{methods} \method{get\_first\_unused}{findet kleinste unbenutzte Zahl}{\log(n)} diff --git a/datastructures/dynamicConvexHull.cpp b/datastructures/dynamicConvexHull.cpp index d669847..2bd67a6 100644 --- a/datastructures/dynamicConvexHull.cpp +++ b/datastructures/dynamicConvexHull.cpp @@ -19,13 +19,11 @@ struct HullDynamic : multiset<Line, less<>> { auto x = insert({m, b, 0}); while (isect(x, next(x))) erase(next(x)); if (x != begin()) { - x--; - if (isect(x, next(x))) { - erase(next(x)); - isect(x, next(x)); - }} + --x; + while (isect(x, next(x))) erase(next(x)); + } while (x != begin() && prev(x)->p >= x->p) { - x--; + --x; isect(x, erase(next(x))); }} diff --git a/datastructures/fenwickTree.cpp b/datastructures/fenwickTree.cpp index cac3cf8..7013613 100644 --- a/datastructures/fenwickTree.cpp +++ b/datastructures/fenwickTree.cpp @@ -1,15 +1,15 @@ vector<ll> tree; void update(int i, ll val) { - for (i++; i < sz(tree); i += (i & (-i))) tree[i] += val; + for (i++; i < ssize(tree); i += i & -i) tree[i] += val; } void init(int n) { - tree.assign(n + 1,0); + tree.assign(n + 1, 0); } ll prefix_sum(int i) { ll sum = 0; - for (i++; i > 0; i -= (i & (-i))) sum += tree[i]; + for (; i > 0; i &= i-1) sum += tree[i]; return sum; } diff --git a/datastructures/fenwickTree2.cpp b/datastructures/fenwickTree2.cpp index ff87e2e..7fcdbb9 100644 --- a/datastructures/fenwickTree2.cpp +++ b/datastructures/fenwickTree2.cpp @@ -1,21 +1,21 @@ vector<ll> add, mul; void update(int l, int r, ll val) { - for (int tl = l + 1; tl < sz(add); tl += tl&(-tl)) + for (int tl = l + 1; tl < ssize(add); tl += tl & -tl) add[tl] += val, mul[tl] -= val * l; - for (int tr = r + 1; tr < sz(add); tr += tr&(-tr)) + for (int tr = r + 1; tr < ssize(add); tr += tr & -tr) add[tr] -= val, mul[tr] += val * r; } -void init(vector<ll>& v) { - mul.assign(sz(v) + 1,0); - add.assign(sz(v) + 1,0); - for(int i = 0; i < sz(v); i++) update(i, i + 1, v[i]); +void init(vector<ll> &v) { + mul.assign(size(v) + 1, 0); + add.assign(size(v) + 1, 0); + for(int i = 0; i < ssize(v); i++) update(i, i + 1, v[i]); } -ll prefix_sum (int i) { - ll res = 0; i++; - for (int ti = i; ti > 0; ti -= ti&(-ti)) +ll prefix_sum(int i) { + ll res = 0; + for (int ti = i; ti > 0; ti &= ti-1) res += add[ti] * i + mul[ti]; return res; } diff --git a/datastructures/lazyPropagation.cpp b/datastructures/lazyPropagation.cpp index 0fe7bbe..2202a6f 100644 --- a/datastructures/lazyPropagation.cpp +++ b/datastructures/lazyPropagation.cpp @@ -1,21 +1,22 @@ struct SegTree { using T = ll; using U = ll; - int n, h; static constexpr T E = 0; // Neutral element for combine static constexpr U UF = 0; // Unused value by updates + int n; vector<T> tree; vector<U> lazy; + int h; vector<ll> k; // size of segments (optional) - SegTree(const vector<T>& a) : n(sz(a) + 1), tree(2 * n, E), + SegTree(const vector<T>& a) : n(ssize(a) + 1), tree(2 * n), //SegTree(int size, T def = E) : n(size + 1), tree(2 * n, def), - h(__lg(2 * n)), lazy(n, UF), k(2 * n, 1) { - copy(all(a), tree.begin() + n); + lazy(n, UF), h(__lg(2 * n)), k(2 * n, 1) { + ranges::copy(a, tree.begin() + n); for (int i = n - 1; i > 0; i--) { k[i] = 2 * k[2 * i]; tree[i] = comb(tree[2 * i], tree[2 * i + 1]); }} - T comb(T a, T b) {return a + b;} // Modify this + E + T comb(T a, T b) { return a + b; } // Modify this + E void apply(int i, U val) { // And this + UF tree[i] = val * k[i]; @@ -42,17 +43,17 @@ struct SegTree { void update(int l, int r, U val) { l += n, r += n; int l0 = l, r0 = r; - push(l0), push(r0 - 1); + push(l0), push(r0); for (; l < r; l /= 2, r /= 2) { if (l&1) apply(l++, val); if (r&1) apply(--r, val); } - build(l0), build(r0 - 1); + build(l0), build(r0); } T query(int l, int r) { l += n, r += n; - push(l), push(r - 1); + push(l), push(r); T resL = E, resR = E; for (; l < r; l /= 2, r /= 2) { if (l&1) resL = comb(resL, tree[l++]); diff --git a/datastructures/monotonicConvexHull.cpp b/datastructures/monotonicConvexHull.cpp index 44bff83..e7b9b3e 100644 --- a/datastructures/monotonicConvexHull.cpp +++ b/datastructures/monotonicConvexHull.cpp @@ -1,27 +1,26 @@ -// Lower Envelope mit MONOTONEN Inserts und Queries. Jede neue -// Gerade hat kleinere Steigung als alle vorherigen. -struct Line { - ll m, b; - ll operator()(ll x) {return m*x+b;} -}; +struct Envelope { + struct Line { + ll m, b; + ll operator()(ll x) { return m*x+b; } + }; -vector<Line> ls; -int ptr = 0; + vector<Line> ls; + int ptr = 0; -bool bad(Line l1, Line l2, Line l3) { - return (l3.b-l1.b)*(l1.m-l2.m) < (l2.b-l1.b)*(l1.m-l3.m); -} + static bool bad(Line l1, Line l2, Line l3) { + return (l3.b-l1.b)*(l1.m-l2.m) < (l2.b-l1.b)*(l1.m-l3.m); + } -void add(ll m, ll b) { // Laufzeit O(1) amortisiert - while (sz(ls) > 1 && bad(ls.end()[-2], ls.end()[-1], {m, b})) { - ls.pop_back(); + void add(ll m, ll b) { + while (sz(ls) > 1 && bad(ls.end()[-2], ls.back(), {m, b})) { + ls.pop_back(); + } + ls.push_back({m, b}); + ptr = min(ptr, sz(ls) - 1); } - ls.push_back({m, b}); - ptr = min(ptr, sz(ls) - 1); -} -ll query(ll x) { // Laufzeit: O(1) amortisiert - ptr = min(ptr, sz(ls) - 1); - while (ptr < sz(ls)-1 && ls[ptr + 1](x) < ls[ptr](x)) ptr++; - return ls[ptr](x); -}
\ No newline at end of file + ll query(ll x) { + while (ptr < sz(ls)-1 && ls[ptr + 1](x) < ls[ptr](x)) ptr++; + return ls[ptr](x); + } +}; diff --git a/datastructures/pbds.cpp b/datastructures/pbds.cpp index c2b44cc..a0e5383 100644 --- a/datastructures/pbds.cpp +++ b/datastructures/pbds.cpp @@ -1,10 +1,19 @@ +#include <ext/pb_ds/priority_queue.hpp> +template<typename T> +using pQueue = __gnu_pbds::priority_queue<T>; //<T, greater<T>> +auto it = pq.push(5); // O(1) +pq.modify(it, 6); // O(log n) +pq.erase(it); // O(log n) +pq.join(pq2); // O(1) +pq.swap(pq2); // O(1) + #include <ext/pb_ds/assoc_container.hpp> using namespace __gnu_pbds; template<typename T> using Tree = tree<T, null_type, less<T>, rb_tree_tag, tree_order_statistics_node_update>; -// T.order_of_key(x): number of elements strictly less than x -// *T.find_by_order(k): k-th element +T.order_of_key(x); // number of elements strictly less than x +auto it = T.find_by_order(k); // k-th element template<typename T> struct chash { diff --git a/datastructures/persistent.cpp b/datastructures/persistent.cpp index 0a65a79..4093cdc 100644 --- a/datastructures/persistent.cpp +++ b/datastructures/persistent.cpp @@ -7,7 +7,7 @@ struct persistent { : time(time), data(1, {time, value}) {}
T get(int t) {
- return prev(upper_bound(all(data), {t+1, {}}))->second;
+ return prev(upper_bound(all(data), pair{t+1, T{}}))->second;
}
int set(T value) {
diff --git a/datastructures/segmentTree.cpp b/datastructures/segmentTree.cpp index 79c6cae..1fbf886 100644 --- a/datastructures/segmentTree.cpp +++ b/datastructures/segmentTree.cpp @@ -4,14 +4,15 @@ struct SegTree { vector<T> tree; static constexpr T E = 0; // Neutral element for combine - SegTree(vector<T>& a) : n(sz(a)), tree(2 * n) { - //SegTree(int size, T val = E) : n(size), tree(2 * n, val) { - copy(all(a), tree.begin() + n); + SegTree(vector<T>& a) : n(ssize(a)), tree(2 * n, E) { + ranges::copy(a, tree.begin() + n); + //SegTree(int size, T val = E) : n(size), tree(2 * n, E) { + // fill(tree.begin() + n, tree.end(), val); for (int i = n - 1; i > 0; i--) { // remove for range update tree[i] = comb(tree[2 * i], tree[2 * i + 1]); }} - ll comb(T a, T b) {return a + b;} // modify this + neutral + T comb(T a, T b) { return a + b; } // modify this + neutral void update(int i, T val) { tree[i += n] = val; // apply update code diff --git a/datastructures/sparseTable.cpp b/datastructures/sparseTable.cpp index 63cce48..64a892a 100644 --- a/datastructures/sparseTable.cpp +++ b/datastructures/sparseTable.cpp @@ -6,9 +6,9 @@ struct SparseTable { return a[lidx] <= a[ridx] ? lidx : ridx; } - void init(vector<ll> *vec) { - int n = sz(*vec); - a = vec->data(); + void init(vector<ll> &vec) { + int n = sz(vec); + a = vec.data(); st.assign(__lg(n) + 1, vector<int>(n)); iota(all(st[0]), 0); for (int j = 0; (2 << j) <= n; j++) { diff --git a/datastructures/sparseTableDisjoint.cpp b/datastructures/sparseTableDisjoint.cpp index 31e9025..dc5297a 100644 --- a/datastructures/sparseTableDisjoint.cpp +++ b/datastructures/sparseTableDisjoint.cpp @@ -1,5 +1,5 @@ struct DisjointST { - static constexpr ll neutral = 0 + static constexpr ll neutral = 0; vector<vector<ll>> dst; ll* a; @@ -7,16 +7,16 @@ struct DisjointST { return x + y; } - void init(vector<ll> *vec) { - int n = sz(*vec); - a = vec->data(); + void init(vector<ll> &vec) { + int n = sz(vec); + a = vec.data(); dst.assign(__lg(n) + 1, vector<ll>(n + 1, neutral)); for (int h = 0, l = 1; l <= n; h++, l *= 2) { for (int c = l; c < n + l; c += 2 * l) { for (int i = c; i < min(n, c + l); i++) - dst[h][i + 1] = combine(dst[h][i], vec->at(i)); + dst[h][i + 1] = combine(dst[h][i], vec[i]); for (int i = min(n, c); i > c - l; i--) - dst[h][i - 1] = combine(vec->at(i - 1), dst[h][i]); + dst[h][i - 1] = combine(vec[i - 1], dst[h][i]); }}} ll query(int l, int r) { diff --git a/datastructures/stlHashMap.cpp b/datastructures/stlHashMap.cpp deleted file mode 100644 index b107dde..0000000 --- a/datastructures/stlHashMap.cpp +++ /dev/null @@ -1,17 +0,0 @@ -#include <ext/pb_ds/assoc_container.hpp> -using namespace __gnu_pbds; - -template<typename T> -struct betterHash { - size_t operator()(T o) const { - size_t h = hash<T>()(o) ^ 42394245; //random value - h = ((h >> 16) ^ h) * 0x45d9f3b; - h = ((h >> 16) ^ h) * 0x45d9f3b; - h = ((h >> 16) ^ h); - return h; -}}; - -template<typename K, typename V, typename H = betterHash<K>> -using hashMap = gp_hash_table<K, V, H>; -template<typename K, typename H = betterHash<K>> -using hashSet = gp_hash_table<K, null_type, H>; diff --git a/datastructures/stlPQ.cpp b/datastructures/stlPQ.cpp deleted file mode 100644 index 4e439f8..0000000 --- a/datastructures/stlPQ.cpp +++ /dev/null @@ -1,15 +0,0 @@ -#include <ext/pb_ds/priority_queue.hpp> -template<typename T> -// greater<T> für Min-Queue -using priorityQueue = __gnu_pbds::priority_queue<T, less<T>>; - -int main() { - priorityQueue<int> pq; - auto it = pq.push(5); // O(1) - pq.push(7); - pq.pop(); // O(log n) amortisiert - pq.modify(it, 6); // O(log n) amortisiert - pq.erase(it); // O(log n) amortisiert - priorityQueue<int> pq2; - pq.join(pq2); // O(1) -} diff --git a/datastructures/stlPriorityQueue.cpp b/datastructures/stlPriorityQueue.cpp deleted file mode 100644 index 32b2455..0000000 --- a/datastructures/stlPriorityQueue.cpp +++ /dev/null @@ -1,8 +0,0 @@ -#include <ext/pb_ds/priority_queue.hpp> -template<typename T> -using pQueue = __gnu_pbds::priority_queue<T>; //<T, greater<T>> - -auto it = pq.push(5); -pq.modify(it, 6); -pq.join(pq2); -// push, join are O(1), pop, modify, erase O(log n) amortized diff --git a/datastructures/stlTree.cpp b/datastructures/stlTree.cpp deleted file mode 100644 index fbb68b9..0000000 --- a/datastructures/stlTree.cpp +++ /dev/null @@ -1,13 +0,0 @@ -#include <ext/pb_ds/assoc_container.hpp> -#include <ext/pb_ds/tree_policy.hpp> -using namespace std; using namespace __gnu_pbds; -template<typename T> -using Tree = tree<T, null_type, less<T>, rb_tree_tag, - tree_order_statistics_node_update>; - -int main() { - Tree<int> X; - for (int i : {1, 2, 4, 8, 16}) X.insert(i); - *X.find_by_order(3); // => 8 - X.order_of_key(10); // => 4 = min i, mit X[i] >= 10 -} diff --git a/datastructures/test/fenwickTree.cpp b/datastructures/test/fenwickTree.cpp new file mode 100644 index 0000000..f9dd619 --- /dev/null +++ b/datastructures/test/fenwickTree.cpp @@ -0,0 +1,26 @@ +#include "../fenwickTree.cpp" + +void test(int n) { + vector<ll> naive(n); + init(n); + + for (int i = 0; i < 1000; i++) { + int p = util::randint(n); + ll delta = util::randint(); + update(p, delta); + naive[p] += delta; + + int r = util::randint(n+1); + + ll naive_result = 0; + for (int i = 0; i < r; i++) naive_result += naive[i]; + ll fenwick_result = prefix_sum(r); + assert(naive_result == fenwick_result); + } +} + +int main() { + test(1); + test(100); + test(1000); +} diff --git a/datastructures/test/fenwickTree2.cpp b/datastructures/test/fenwickTree2.cpp new file mode 100644 index 0000000..18ebcb7 --- /dev/null +++ b/datastructures/test/fenwickTree2.cpp @@ -0,0 +1,28 @@ +#include "../fenwickTree2.cpp" + +void test(int n) { + vector<ll> naive(n); + for (int i = 0; i < n; i++) naive[i] = util::randint(); + init(naive); + + for (int i = 0; i < 1000; i++) { + int l = util::randint(n), r = util::randint(n); + if (l > r) swap(l, r); + ll delta = util::randint(); + update(l, r, delta); + for (int i = l; i < r; i++) naive[i] += delta; + + r = util::randint(n+1); + + ll naive_result = 0; + for (int i = 0; i < r; i++) naive_result += naive[i]; + ll fenwick_result = prefix_sum(r); + assert(naive_result == fenwick_result); + } +} + +int main() { + test(1); + test(100); + test(1000); +} diff --git a/datastructures/test/lazyPropagation.awk b/datastructures/test/lazyPropagation.awk new file mode 100644 index 0000000..fc39305 --- /dev/null +++ b/datastructures/test/lazyPropagation.awk @@ -0,0 +1,86 @@ + +/Neutral element for combine/ { + print "#ifndef SEGTREE_FIRST_NEG" + print "# ifndef SEGTREE_MAX" + print + print "# else" + tmp = $0 + sub(/0/, "numeric_limits<T>::min()", tmp) + print tmp + print "# endif" + print "#else" + sub(/0/, "numeric_limits<T>::max()") + print + print "#endif" + next +} + +/Modify this \+ E/ { + print "#ifndef SEGTREE_FIRST_NEG" + print "# ifndef SEGTREE_MAX" + print + print "# else" + tmp = $0 + sub(/a \+ b/, "max(a, b)", tmp) + print tmp + print "# endif" + print "#else" + sub(/a \+ b/, "a < 0 ? a : min(a, b)") + print + print "#endif" + next +} + +/Unused value by updates/ { + print "#ifndef SEGTREE_FIRST_NEG" + print + print "#else" + sub(/0/, /numeric_limits<U>::max()/) + print + print "#endif" + next +} + +/And this \+ UF/ { + print + getline set_tree + getline set_lazy + print "#ifndef SEGTREE_MAX" + print "# ifndef SEGTREE_FIRST_NEG" + print set_tree + print "# else" + tmp = set_tree + sub(/val \* k\[i\]/, "val", tmp) + print tmp + print "# endif" + print set_lazy + print "#else" + sub(/= val \* k\[i\]/, "+= val", set_tree) + sub(/= val/, "+= val", set_lazy) + print set_tree + print set_lazy + print "#endif" + next +} + +/Optional/ { print "#ifdef SEGTREE_MAX" } +/^\};$/ { print "#endif" } + +/SegTree\(const vector<T>& a\)/ { + print "#ifndef SEGTREE_INIT_DEFAULT" + print + print "#else" + getline + sub(/\/\//, "") + print + print "#endif" + getline + print + print "#ifndef SEGTREE_INIT_DEFAULT" + getline + print + print "#endif" + next +} + +{ print } diff --git a/datastructures/test/lazyPropagation.cpp b/datastructures/test/lazyPropagation.cpp new file mode 100644 index 0000000..df97b14 --- /dev/null +++ b/datastructures/test/lazyPropagation.cpp @@ -0,0 +1,102 @@ +#include "lazyPropagation.tmp.cpp" + +void test(int n) { +#ifndef SEGTREE_INIT_DEFAULT + vector<ll> a(n); + for (ll &x: a) x = util::randint(); + SegTree seg(a); +#else + ll init = util::randint(); +# ifdef SEGTREE_FIRST_NEG + init = abs(init); +# endif + vector<ll> a(n, init); + SegTree seg(n, init); +#endif + for (int i = 0; i < 5*n; i++) { + { + int l = util::randint(n+1); + int r = util::randint(n+1); + if (l > r) swap(l, r); + ll v = util::randint(); +#ifndef SEGTREE_FIRST_NEG +# ifndef SEGTREE_MAX + if (v == 0) v = 1; +# endif +#endif + for (int j = l; j < r; j++) { +#ifndef SEGTREE_MAX + a[j] = v; +#else + a[j] += v; +#endif + } + seg.update(l, r, v); + } + { + int l = util::randint(n+1); + int r = util::randint(n+1); + if (l > r) swap(l, r); +#ifndef SEGTREE_FIRST_NEG +# ifndef SEGTREE_MAX + ll comp = 0; +# else + ll comp = numeric_limits<ll>::min(); +# endif +#else + ll comp = numeric_limits<ll>::max(); +#endif + for (int j = l; j < r; j++) { +#ifndef SEGTREE_FIRST_NEG +# ifndef SEGTREE_MAX + comp += a[j]; +# else + comp = max(comp, a[j]); +# endif +#else + if (comp >= 0 && comp > a[j]) comp = a[j]; +#endif + } + assert(seg.query(l, r) == comp); + } +#ifdef SEGTREE_MAX + { + int l = util::randint(n+1); + int r = util::randint(n+1); + if (l > r) swap(l, r); + ll bound = util::randint(); + int found = -1; + for (int j = l; j < r; j++) { + if (a[j] >= bound) { + found = j; + break; + } + } + assert(seg.lower_bound(l, r, bound) == found); + } +#endif + } +} + +int main() { + test(1000); + test(1); + { +#ifndef SEGTREE_INIT_DEFAULT + vector<ll> a; + SegTree seg(a); +#else + SegTree seg(0); +#endif + seg.update(0, 0, util::randint()); +#ifndef SEGTREE_FIRST_NEG +# ifndef SEGTREE_MAX + assert(seg.query(0, 0) == 0); +# else + assert(seg.query(0, 0) == numeric_limits<ll>::min()); +# endif +#else + assert(seg.query(0, 0) == numeric_limits<ll>::max()); +#endif + } +} diff --git a/datastructures/test/monotonicConvexHull.cpp b/datastructures/test/monotonicConvexHull.cpp new file mode 100644 index 0000000..08927a2 --- /dev/null +++ b/datastructures/test/monotonicConvexHull.cpp @@ -0,0 +1,27 @@ +#define sz(X) ((int)size(X)) +#include "../monotonicConvexHull.cpp" + +int main() { + { + Envelope env; + env.add(10, 0); + assert(env.query(0) == 0); + assert(env.query(1) == 10); + env.add(8, 5); + assert(env.query(1) == 10); + assert(env.query(2) == 20); + assert(env.query(3) == 29); + env.add(7, 10); + assert(env.query(10) == 80); + env.add(0, 0); + assert(env.query(11) == 0); + } + + { + Envelope env; + env.add(1, 0); + env.add(0, 10); + env.add(-1, 10); + assert(env.query(7) == 3); + } +} diff --git a/datastructures/test/persistent.cpp b/datastructures/test/persistent.cpp new file mode 100644 index 0000000..5e5f864 --- /dev/null +++ b/datastructures/test/persistent.cpp @@ -0,0 +1,11 @@ +#define all(X) begin(X), end(X) +#include "../persistent.cpp" + +int main() { + int time = 0; + persistent<int> p(time, 0); + p.set(1); + int t1 = time; + p.set(2); + assert(p.get(t1) == 1); +} diff --git a/datastructures/test/segmentTree.awk b/datastructures/test/segmentTree.awk new file mode 100644 index 0000000..e863d4e --- /dev/null +++ b/datastructures/test/segmentTree.awk @@ -0,0 +1,74 @@ + +/Neutral element for combine/ { + print "#ifndef SEGTREE_MUL" + print "# ifndef SEGTREE_FIRST_NEG" + print + print "# else" + tmp = $0 + sub(/0/, "numeric_limits<ll>::max()", tmp) + print tmp + print "# endif" + print "#else" + sub(/0/, "1") + print + print "#endif" + next +} + +/modify this \+ neutral/ { + print "#ifndef SEGTREE_MUL" + print "# ifndef SEGTREE_FIRST_NEG" + print + print "# else" + tmp = $0 + sub(/a \+ b/, "a < 0 ? a : min(a, b)", tmp) + print tmp + print "# endif" + print "#else" + sub(/a \+ b/, "a*b % MOD") + print + print "#endif" + next +} + +/SegTree\(vector<T>& a\)/ { + print "#ifndef SEGTREE_INIT_DEFAULT" + print + getline + print + print "#else" + getline + sub(/\/\//, "") + print + getline + sub(/\/\//, "") + print + print "#endif" + next +} + +/remove for range update/ { + print "#ifndef SEGTREE_RANGE_UPDATE" + print + getline + print + getline + print "\t\t}" + print "#endif" + print "\t}" + next +} + +/void update/ { + print "#ifndef SEGTREE_RANGE_UPDATE" +} + +/OR: range update/ { + print "#else" +} + +/^\};$/ { + print "#endif" +} + +{ print } diff --git a/datastructures/test/segmentTree.cpp b/datastructures/test/segmentTree.cpp new file mode 100644 index 0000000..44d99b6 --- /dev/null +++ b/datastructures/test/segmentTree.cpp @@ -0,0 +1,100 @@ +#ifdef SEGTREE_MUL +constexpr ll MOD = 1'000'000'007; +#endif + +#include "segmentTree.tmp.cpp" + +void test(int n) { +#ifndef SEGTREE_INIT_DEFAULT + vector<ll> a(n); + for (ll &x: a) x = util::randint(); + SegTree seg(a); +#else + ll init = util::randint(); +# ifdef SEGTREE_FIRST_NEG + init = abs(init); +# endif + vector<ll> a(n, init); + SegTree seg(n, init); +#endif + for (int i = 0; i < 5*n; i++) { + { +#ifndef SEGTREE_RANGE_UPDATE + int j = util::randint(n); + ll v = util::randint(); + a[j] = v; + seg.update(j, v); +#else + int l = util::randint(n+1); + int r = util::randint(n+1); + if (l > r) swap(l, r); + ll v = util::randint(); + for (int j = l; j < r; j++) { +# ifndef SEGTREE_MUL + a[j] += v; +# else + a[j] = a[j]*v % MOD; +# endif + } + seg.modify(l, r, v); +#endif + } + { +#ifndef SEGTREE_RANGE_UPDATE + int l = util::randint(n+1); + int r = util::randint(n+1); + if (l > r) swap(l, r); +# ifndef SEGTREE_MUL +# ifndef SEGTREE_FIRST_NEG + ll comp = 0; +# else + ll comp = numeric_limits<ll>::max(); +# endif +# else + ll comp = 1; +# endif + for (int j = l; j < r; j++) { +# ifndef SEGTREE_MUL +# ifndef SEGTREE_FIRST_NEG + comp += a[j]; +# else + if (comp >= 0 && comp > a[j]) comp = a[j]; +# endif +# else + comp = comp * a[j] % MOD; +# endif + } + assert(seg.query(l, r) == comp); +#else + int j = util::randint(n); + assert(seg.query(j) == a[j]); +#endif + } + } +} + +int main() { + test(1000); + test(1); + { +#ifndef SEGTREE_INIT_DEFAULT + vector<ll> a; + SegTree seg(a); +#else + SegTree seg(0); +#endif +#ifndef SEGTREE_RANGE_UPDATE +# ifndef SEGTREE_MUL +# ifndef SEGTREE_FIRST_NEG + assert(seg.query(0, 0) == 0); +# else + assert(seg.query(0, 0) == numeric_limits<ll>::max()); +# endif +# else + assert(seg.query(0, 0) == 1); +# endif +#else + seg.modify(0, 0, util::randint()); +#endif + } +} diff --git a/datastructures/test/sparseTable.cpp b/datastructures/test/sparseTable.cpp new file mode 100644 index 0000000..03ef548 --- /dev/null +++ b/datastructures/test/sparseTable.cpp @@ -0,0 +1,26 @@ +#define sz ssize +#define all(X) begin(X), end(X) +#include "../sparseTable.cpp" + +void test(int n) { + vector<ll> values(n); + for (ll &x: values) x = util::randint(); + SparseTable st; + st.init(values); + for (int i = 0; i < n; i++) { + int l = util::randint(n); + int r = util::randint(n); + if (l > r) swap(l, r); + r++; + assert( + values[st.queryIdempotent(l, r)] + == + *min_element(values.begin()+l, values.begin()+r) + ); + } +} + +int main() { + test(1000); + test(1); +} diff --git a/datastructures/test/sparseTableDisjoint.cpp b/datastructures/test/sparseTableDisjoint.cpp new file mode 100644 index 0000000..7ef6483 --- /dev/null +++ b/datastructures/test/sparseTableDisjoint.cpp @@ -0,0 +1,26 @@ +#define sz ssize +#define all(X) begin(X), end(X) +#include "../sparseTableDisjoint.cpp" + +void test(int n) { + vector<ll> values(n); + for (ll &x: values) x = util::randint(); + DisjointST st; + st.init(values); + for (int i = 0; i < n; i++) { + int l = util::randint(n); + int r = util::randint(n); + if (l > r) swap(l, r); + r++; + assert( + st.query(l, r) + == + accumulate(values.begin()+l, values.begin()+r, 0ll) + ); + } +} + +int main() { + test(1000); + test(1); +} diff --git a/datastructures/test/treap2.cpp b/datastructures/test/treap2.cpp new file mode 100644 index 0000000..1a435d3 --- /dev/null +++ b/datastructures/test/treap2.cpp @@ -0,0 +1,41 @@ +#include "../treap2.cpp" + +void test(int n) { + Treap treap; + vector<ll> dumb; + for (int i = 0; i < n; i++) { + assert(treap.getSize(treap.root) == ssize(dumb)); + int j = util::randint(ssize(dumb) + 1); + ll x = util::randint(); + treap.insert(j, x); + dumb.insert(begin(dumb) + j, x); + } + for (int i = 0; i < 5*n; i++) { + assert(treap.getSize(treap.root) == ssize(dumb)); + { + int j = util::randint(ssize(dumb)); + treap.remove(j); + dumb.erase(begin(dumb) + j); + } + { + int j = util::randint(ssize(dumb) + 1); + ll x = util::randint(); + treap.insert(j, x); + dumb.insert(begin(dumb) + j, x); + } + } + for (int i = 0; i < n; i++) { + assert(treap.getSize(treap.root) == ssize(dumb)); + int j = util::randint(ssize(dumb)); + treap.remove(j); + dumb.erase(begin(dumb) + j); + } + assert(treap.root < 0); + assert(empty(dumb)); +} + +int main() { + test(1000); + test(1); + test(0); +} diff --git a/datastructures/test/waveletTree.cpp b/datastructures/test/waveletTree.cpp new file mode 100644 index 0000000..c5da1d2 --- /dev/null +++ b/datastructures/test/waveletTree.cpp @@ -0,0 +1,38 @@ +#include "../waveletTree.cpp" + +void test(int n) { + vector<ll> values(n); + for (ll &x: values) x = util::randint(); + vector<ll> backup = values; + WaveletTree wave(values); + assert(values == backup); + for (int i = 0; i < n; i++) { + int l = util::randint(n+1); + int r = util::randint(n+1); + if (l > r) swap(l, r); + ll bound = util::randint(); + int res = 0; + for (ll x: values | views::take(r) | views::drop(l)) { + if (x < bound) res++; + } + assert(wave.countSmaller(l, r, bound) == res); + } + for (int i = 0; 5*i < n; i++) { + int l = util::randint(n); + int m = util::randint(n); + int r = util::randint(n); + if (l > m) swap(l, m); + if (m > r) swap(m, r); + if (l > m) swap(l, m); + r++; + int k = m-l; + vector<ll> part(values.begin()+l, values.begin()+r); + ranges::nth_element(part, part.begin() + k); + assert(wave.kth(l, r, k) == part[k]); + } +} + +int main() { + test(1000); + test(1); +} diff --git a/datastructures/treap2.cpp b/datastructures/treap2.cpp index 10168ca..e40da0d 100644 --- a/datastructures/treap2.cpp +++ b/datastructures/treap2.cpp @@ -3,7 +3,7 @@ struct Treap { struct Node { ll val; int prio, size = 1, l = -1, r = -1; - Node (ll x) : val(x), prio(rng()) {} + Node(ll x): val(x), prio(rng()) {} }; vector<Node> treap; @@ -15,35 +15,35 @@ struct Treap { void upd(int v) { if (v < 0) return; - auto *V = &treap[v]; - V->size = 1 + getSize(V->l) + getSize(V->r); + auto &V = treap[v]; + V.size = 1 + getSize(V.l) + getSize(V.r); // Update Node Code } void push(int v) { if (v < 0) return; - //auto *V = &treap[v]; - //if (V->lazy) { + //auto &V = treap[v]; + //if (V.lazy) { // Lazy Propagation Code - // if (V->l >= 0) treap[V->l].lazy = true; - // if (V->r >= 0) treap[V->r].lazy = true; - // V->lazy = false; + // if (V.l >= 0) treap[V.l].lazy = true; + // if (V.r >= 0) treap[V.r].lazy = true; + // V.lazy = false; //} } pair<int, int> split(int v, int k) { if (v < 0) return {-1, -1}; - auto *V = &treap[v]; + auto &V = treap[v]; push(v); - if (getSize(V->l) >= k) { // "V->val >= k" for lower_bound(k) - auto [left, right] = split(V->l, k); - V->l = right; + if (getSize(V.l) >= k) { // "V.val >= k" for lower_bound(k) + auto [left, right] = split(V.l, k); + V.l = right; upd(v); return {left, v}; } else { // and only "k" - auto [left, right] = split(V->r, k - getSize(V->l) - 1); - V->r = left; + auto [left, right] = split(V.r, k - getSize(V.l) - 1); + V.r = left; upd(v); return {v, right}; }} @@ -66,7 +66,7 @@ struct Treap { void insert(int i, ll val) { // and i = val auto [left, right] = split(root, i); treap.emplace_back(val); - left = merge(left, sz(treap) - 1); + left = merge(left, ssize(treap) - 1); root = merge(left, right); } diff --git a/datastructures/unionFind2.cpp b/datastructures/unionFind2.cpp index 5362c4d..d1b9162 100644 --- a/datastructures/unionFind2.cpp +++ b/datastructures/unionFind2.cpp @@ -1,25 +1,26 @@ -vector<int> uf; +// unions[i] >= 0 => unions[i] = parent +// unions[i] < 0 => unions[i] = -size +vector<int> unions; -init(int N) { - uf.assign(N,-1); //-1 indicates that every subset has size 1 +init(int n) { + unions.assign(n, -1); } int findSet(int i) { - if(uf[i] < 0) return i; //If uf[i] < 0 we have reach a root - uf[i] = findSet(uf[i]); //Path-Compression - return uf[i]; + if (unions[i] < 0) return i; + return unions[i] = findSet(unions[i]); } -void linkSets(int i, int j) { - //Take |uf[i]|, where i must be a root, to get the size - //of the subset - if(abs(uf[i]) < abs(uf[j])) { //Union-by-size. - uf[j] += uf[i]; uf[i] = j; - } else { - uf[i] += uf[j]; uf[j] = i; - } +void linkSets(int a, int b) { // Union by size. + if (unions[b] > unions[a]) swap(a, b); + unions[b] += unions[a]; + unions[a] = b; } -void unionSets(int i, int j) { - if(findSet(i) != findSet(j)) linkSets(findSet(i),findSet(j)); +void unionSets(int a, int b) { + if (findSet(a) != findSet(b)) linkSets(findSet(a),findSet(b)); +} + +void getSize(int i) { + return -unions[findSet(i)]; } diff --git a/datastructures/waveletTree.cpp b/datastructures/waveletTree.cpp index 476658e..95ff207 100644 --- a/datastructures/waveletTree.cpp +++ b/datastructures/waveletTree.cpp @@ -1,25 +1,20 @@ struct WaveletTree { - using it = vector<ll>::iterator; - WaveletTree *ln = nullptr, *rn = nullptr; + unique_ptr<WaveletTree> ln, rn; vector<int> b = {0}; ll lo, hi; - WaveletTree(vector<ll> in) : WaveletTree(all(in)) {} - - WaveletTree(it from, it to) : // call above one - lo(*min_element(from, to)), hi(*max_element(from, to) + 1) { + WaveletTree(auto in) : lo(*ranges::min_element(in)), + hi(*ranges::max_element(in) + 1) { ll mid = (lo + hi) / 2; - auto f = [&](ll x) {return x < mid;}; - for (it c = from; c != to; c++) { - b.push_back(b.back() + f(*c)); - } + auto f = [&](ll x) { return x < mid; }; + for (ll x: in) b.push_back(b.back() + f(x)); if (lo + 1 >= hi) return; - it pivot = stable_partition(from, to, f); - ln = new WaveletTree(from, pivot); - rn = new WaveletTree(pivot, to); + auto right = ranges::stable_partition(in, f); + ln = make_unique<WaveletTree>( + ranges::subrange(begin(in), begin(right))); + rn = make_unique<WaveletTree>(right); } - // kth element in sort[l, r) all 0-indexed ll kth(int l, int r, int k) { if (l >= r || k >= r - l) return -1; if (lo + 1 >= hi) return lo; @@ -28,13 +23,10 @@ struct WaveletTree { else return rn->kth(l-b[l], r-b[r], k-inLeft); } - // count elements in[l, r) smaller than k int countSmaller(int l, int r, ll k) { if (l >= r || k <= lo) return 0; if (hi <= k) return r - l; return ln->countSmaller(b[l], b[r], k) + rn->countSmaller(l-b[l], r-b[r], k); } - - ~WaveletTree() {delete ln; delete rn;} }; diff --git a/geometry/circle.cpp b/geometry/circle.cpp index 8ebc800..fab4150 100644 --- a/geometry/circle.cpp +++ b/geometry/circle.cpp @@ -1,4 +1,4 @@ -// berechnet die Schnittpunkte von zwei kreisen +// berechnet die Schnittpunkte von zwei Kreisen // (Kreise dürfen nicht gleich sein!) vector<pt> circleIntersection(pt c1, double r1, pt c2, double r2) { @@ -13,7 +13,7 @@ vector<pt> circleIntersection(pt c1, double r1, } // berechnet die Schnittpunkte zwischen -// einem Kreis(Kugel) und einer Grade 2d und 3d +// einem Kreis(Kugel) und einer Grade (2D und 3D) vector<pt> circleRayIntersection(pt center, double r, pt orig, pt dir) { vector<pt> result; @@ -22,7 +22,7 @@ vector<pt> circleRayIntersection(pt center, double r, double c = dot(orig - center, orig - center) - r * r; double discr = b * b - 4 * a * c; if (discr >= 0) { - //t in [0, 1] => schnitt mit segment [orig, orig + dir] + //t in [0, 1] => Schnitt mit Segment [orig, orig + dir] double t1 = -(b + sqrt(discr)) / (2 * a); double t2 = -(b - sqrt(discr)) / (2 * a); if (t1 >= 0) result.push_back(t1 * dir + orig); diff --git a/geometry/formulars.cpp b/geometry/formulas.cpp index 22e9e32..22e9e32 100644 --- a/geometry/formulars.cpp +++ b/geometry/formulas.cpp diff --git a/geometry/formulars3d.cpp b/geometry/formulas3d.cpp index 84e17c0..84e17c0 100644 --- a/geometry/formulars3d.cpp +++ b/geometry/formulas3d.cpp diff --git a/geometry/geometry.tex b/geometry/geometry.tex index 95c0adb..57c8092 100644 --- a/geometry/geometry.tex +++ b/geometry/geometry.tex @@ -15,12 +15,12 @@ \sourcecode{geometry/antipodalPoints.cpp} \end{algorithm} -\begin{algorithm}{Konvexehülle} +\begin{algorithm}{Konvexe Hülle} \begin{methods} \method{convexHull}{berechnet konvexe Hülle}{n\*\log(n)} \end{methods} \begin{itemize} - \item Konvexe Hülle gegen den Uhrzeigersinn sortiert + \item konvexe Hülle gegen den Uhrzeigersinn sortiert \item nur Eckpunkte enthalten(für alle Punkte = im CCW Test entfernen) \item erster und letzter Punkt sind identisch \end{itemize} @@ -28,7 +28,7 @@ \end{algorithm} \subsection{Formeln~~--~\texttt{std::complex}} -\sourcecode{geometry/formulars.cpp} +\sourcecode{geometry/formulas.cpp} \sourcecode{geometry/linesAndSegments.cpp} \sourcecode{geometry/sortAround.cpp} \input{geometry/triangle} @@ -36,11 +36,11 @@ \sourcecode{geometry/polygon.cpp} \sourcecode{geometry/circle.cpp} -\subsection{Formeln - 3D} -\sourcecode{geometry/formulars3d.cpp} +\subsection{Formeln -- 3D} +\sourcecode{geometry/formulas3d.cpp} \optional{ - \subsection{3D-Kugeln} + \subsection{3D-Kugeln \opthint} \sourcecode{geometry/spheres.cpp} } @@ -48,15 +48,22 @@ \sourcecode{geometry/hpi.cpp} \end{algorithm} +\begin{algorithm}[optional]{Intersecting Segments} + \begin{methods} + \method{intersect}{finds ids of intersecting segments}{n\*\log(n)} + \end{methods} + \sourcecode{geometry/segmentIntersection.cpp} +\end{algorithm} + \begin{algorithm}[optional]{Delaunay Triangulierung} \begin{methods} \method{delaunay}{berechnet Triangulierung}{n\*\log(n)} \end{methods} - \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Traingulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig. + \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Triangulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig. \sourcecode{geometry/delaunay.cpp} \end{algorithm} \optional{ -\subsection{Geraden} +\subsection{Geraden \opthint} \sourcecode{geometry/lines.cpp} } diff --git a/geometry/spheres.cpp b/geometry/spheres.cpp index abffde5..b5d3644 100644 --- a/geometry/spheres.cpp +++ b/geometry/spheres.cpp @@ -1,4 +1,4 @@ -// Great Cirlce Distance mit Längen- und Breitengrad. +// Great Circle Distance mit Längen- und Breitengrad. double gcDist(double pLat, double pLon, double qLat, double qLon, double radius) { pLat *= PI / 180; pLon *= PI / 180; @@ -10,7 +10,7 @@ double gcDist(double pLat, double pLon, sin(pLat) * sin(qLat)); } -// Great Cirlce Distance mit kartesischen Koordinaten. +// Great Circle Distance mit kartesischen Koordinaten. double gcDist(point p, point q) { return acos(p.x * q.x + p.y * q.y + p.z * q.z); } diff --git a/graph/LCA.cpp b/graph/LCA.cpp deleted file mode 100644 index 7debf8f..0000000 --- a/graph/LCA.cpp +++ /dev/null @@ -1,24 +0,0 @@ -vector<vector<int>> adj(); -vector<int> visited(); -vector<int> first(); -vector<int> depth(); - -void initLCA(int gi, int d, int& c) { - visited[c] = gi, depth[c] = d, first[gi] = min(c, first[gi]), c++; - for(int gn : adj[gi]) { - initLCA(gn, d+1, c); - visited[c] = gi, depth[c] = d, c++; -}} - -int getLCA(int a, int b) { - return visited[query(min(first[a], first[b]), max(first[a], first[b]))]; -} - -void exampleUse() { - int c = 0; - visited = vector<int>(2*sz(adj)); - first = vector<int>(sz(adj), 2*sz(adj)); - depth = vector<int>(2*sz(adj)); - initLCA(0, 0, c); - init(depth); -} diff --git a/graph/LCA_sparse.cpp b/graph/LCA_sparse.cpp index 649e697..3e87cde 100644 --- a/graph/LCA_sparse.cpp +++ b/graph/LCA_sparse.cpp @@ -10,16 +10,16 @@ struct LCA { first.assign(sz(adj), 2 * sz(adj)); idx = 0; dfs(adj, root); - st.init(&depth); + st.init(depth); } - void dfs(vector<vector<int>>& adj, int v, ll d=0, int p=-1) { + void dfs(vector<vector<int>>& adj, int v, ll d=0) { visited[idx] = v, depth[idx] = d; first[v] = min(idx, first[v]), idx++; for (int u : adj[v]) { if (first[u] == 2 * sz(adj)) { - dfs(adj, u, d + 1, v); + dfs(adj, u, d + 1); visited[idx] = v, depth[idx] = d, idx++; }}} diff --git a/graph/binary_lifting.cpp b/graph/binary_lifting.cpp new file mode 100644 index 0000000..0b8c218 --- /dev/null +++ b/graph/binary_lifting.cpp @@ -0,0 +1,28 @@ +struct Lift { + vector<int> dep, par, jmp; + + Lift(vector<vector<int>> &adj, int root): + dep(adj.size()), par(adj.size()), jmp(adj.size(), root) { + function<void(int,int,int)> dfs = [&](int u, int p, int d) { + dep[u] = d, par[u] = p; + jmp[u] = dep[p] + dep[jmp[jmp[p]]] == 2*dep[jmp[p]] + ? jmp[jmp[p]] : p; + for (int v: adj[u]) if (v != p) dfs(v, u, d+1); + }; + dfs(root, root, 0); + } + + int depth(int v) { return dep[v]; } + int lift(int v, int d) { + while (dep[v] > d) v = dep[jmp[v]] < d ? par[v] : jmp[v]; + return v; + } + int lca(int u, int v) { + v = lift(v, dep[u]), u = lift(u, dep[v]); + while (u != v) { + auto &a = jmp[u] == jmp[v] ? par : jmp; + u = a[u], v = a[v]; + } + return u; + } +}; diff --git a/graph/cycleCounting.cpp b/graph/cycleCounting.cpp index bd7a219..00ecc3a 100644 --- a/graph/cycleCounting.cpp +++ b/graph/cycleCounting.cpp @@ -1,12 +1,12 @@ constexpr int maxEdges = 128; using cycle = bitset<maxEdges>; -struct cylces { +struct cycles { vector<vector<pair<int, int>>> adj; vector<bool> seen; vector<cycle> paths, base; vector<pair<int, int>> edges; - cylces(int n) : adj(n), seen(n), paths(n) {} + cycles(int n) : adj(n), seen(n), paths(n) {} void addEdge(int u, int v) { adj[u].push_back({v, sz(edges)}); diff --git a/graph/graph.tex b/graph/graph.tex index 9232090..9a0dc24 100644 --- a/graph/graph.tex +++ b/graph/graph.tex @@ -1,12 +1,5 @@ \section{Graphen} -\begin{algorithm}{Kruskal} - \begin{methods}[ll] - berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\ - \end{methods} - \sourcecode{graph/kruskal.cpp} -\end{algorithm} - \begin{algorithm}{Minimale Spannbäume} \paragraph{Schnitteigenschaft} Für jeden Schnitt $C$ im Graphen gilt: @@ -16,6 +9,12 @@ \paragraph{Kreiseigenschaft} Für jeden Kreis $K$ im Graphen gilt: Die schwerste Kante auf dem Kreis ist nicht Teil des minimalen Spannbaums. + + \subsection{\textsc{Kruskal}} + \begin{methods}[ll] + berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\ + \end{methods} + \sourcecode{graph/kruskal.cpp} \end{algorithm} \begin{algorithm}{Heavy-Light Decomposition} @@ -28,7 +27,7 @@ \sourcecode{graph/hld.cpp} \end{algorithm} -\begin{algorithm}{Lowest Common Ancestor} +\begin{algorithm}[optional]{Lowest Common Ancestor} \begin{methods} \method{init}{baut DFS-Baum über $g$ auf}{\abs{V}\*\log(\abs{V})} \method{getLCA}{findet LCA}{1} @@ -37,6 +36,17 @@ \sourcecode{graph/LCA_sparse.cpp} \end{algorithm} +\begin{algorithm}{Binary Lifting} + % https://codeforces.com/blog/entry/74847 + \begin{methods} + \method{Lift}{constructor}{\abs{V}} + \method{depth}{distance to root of vertex $v$}{1} + \method{lift}{vertex above $v$ at depth $d$}{\log(\abs{V})} + \method{lca}{lowest common ancestor of $u$ and $v$}{\log(\abs{V})} + \end{methods} + \sourcecode{graph/binary_lifting.cpp} +\end{algorithm} + \begin{algorithm}{Centroids} \begin{methods} \method{find\_centroid}{findet alle Centroids des Baums (maximal 2)}{\abs{V}} @@ -99,7 +109,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \sourcecode{graph/connect.cpp} \end{algorithm} -\begin{algorithm}{Erd\H{o}s-Gallai} +\begin{algorithm}{\textsc{Erd\H{o}s-Gallai}} Sei $d_1 \geq \cdots \geq d_{n}$. Es existiert genau dann ein Graph $G$ mit Degreesequence $d$ falls $\sum\limits_{i=1}^{n} d_i$ gerade ist und für $1\leq k \leq n$: $\sum\limits_{i=1}^{k} d_i \leq k\cdot(k-1)+\sum\limits_{i=k+1}^{n} \min(d_i, k)$ \begin{methods} \method{havelHakimi}{findet Graph}{(\abs{V}+\abs{E})\cdot\log(\abs{V})} @@ -169,7 +179,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \sourcecode{graph/virtualTree.cpp} \end{algorithm} -\begin{algorithm}{Maximal Cardinatlity Bipartite Matching} +\begin{algorithm}{Maximum Cardinality Bipartite Matching} \label{kuhn} \begin{methods} \method{kuhn}{berechnet Matching}{\abs{V}\*\min(ans^2, \abs{E})} @@ -195,7 +205,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \subsection{Max-Flow} \optional{ -\subsubsection{Capacity Scaling} +\subsubsection{Capacity Scaling \opthint} \begin{methods} \method{maxFlow}{gut bei dünn besetzten Graphen.}{\abs{E}^2\*\log(C)} \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} @@ -204,7 +214,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da } \optional{ -\subsubsection{Push Relabel} +\subsubsection{Push Relabel \opthint} \begin{methods} \method{maxFlow}{gut bei sehr dicht besetzten Graphen.}{\abs{V}^2\*\sqrt{\abs{E}}} \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} @@ -212,24 +222,23 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da \sourcecode{graph/pushRelabel.cpp} } +\subsubsection{\textsc{Dinic}'s Algorithm mit Capacity Scaling} +\begin{methods} + \method{maxFlow}{doppelt so schnell wie \textsc{Ford-Fulkerson}}{\abs{V}^2\cdot\abs{E}} + \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} +\end{methods} +\sourcecode{graph/dinicScaling.cpp} + \begin{algorithm}{Min-Cost-Max-Flow} \begin{methods} \method{mincostflow}{berechnet Fluss}{\abs{V}^2\cdot\abs{E}^2} \end{methods} \sourcecode{graph/minCostMaxFlow.cpp} \end{algorithm} - -\subsubsection{Dinic's Algorithm mit Capacity Scaling} -\begin{methods} - \method{maxFlow}{doppelt so schnell wie Ford Fulkerson}{\abs{V}^2\cdot\abs{E}} - \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} -\end{methods} -\sourcecode{graph/dinicScaling.cpp} -\vfill\null \columnbreak \optional{ -\subsubsection{Anwendungen} +\subsubsection{Anwendungen \opthint} \begin{itemize} \item \textbf{Maximum Edge Disjoint Paths}\newline Finde die maximale Anzahl Pfade von $s$ nach $t$, die keine Kante teilen. diff --git a/graph/test/LCA_sparse.cpp b/graph/test/LCA_sparse.cpp new file mode 100644 index 0000000..c8b0017 --- /dev/null +++ b/graph/test/LCA_sparse.cpp @@ -0,0 +1,63 @@ +#include "util.cpp" +#define all(X) begin(X), end(X) +#define sz ssize +#include "../../datastructures/sparseTable.cpp" +#include "../LCA_sparse.cpp" + +void test(vector<vector<int>> &adj, int root) { + int n = adj.size(); + + vector<int> parent(n); + function<void(int,int)> dfs = [&](int u, int p) { + parent[u] = p; + for (int v: adj[u]) if (v != p) dfs(v, u); + }; + dfs(root, -1); + + function<bool(int, int)> is_ancestor = [&](int u, int v) { + while (v != -1 && u != v) v = parent[v]; + return u == v; + }; + function<int(int)> depth = [&](int v) { + int r = 0; + while ((v = parent[v]) != -1) r++; + return r; + }; + + LCA lca; + lca.init(adj, root); + for (int i = 0; i < n; i++) assert(lca.getDepth(i) == depth(i)); + for (int i = 0; i < 1000; i++) { + int u = util::randint(n); + int v = util::randint(n); + int l = lca.getLCA(u, v); + assert(is_ancestor(l, u)); + assert(is_ancestor(l, v)); + for (int p = parent[l]; int c: adj[l]) { + if (c == p) continue; + assert(!is_ancestor(c, u) || !is_ancestor(c, v)); + } + } +} + +int main() { + { + // Single vertex + vector<vector<int>> adj(1); + test(adj, 0); + } + { + // Path + vector<vector<int>> adj = util::path(100); + int left = 0, mid = 40, right = 99; + util::shuffle_graph(adj, left, mid, right); + test(adj, left); + test(adj, mid); + test(adj, right); + } + { + // Random + vector<vector<int>> adj = util::random_tree(1000); + test(adj, 0); + } +} diff --git a/graph/test/binary_lifting.cpp b/graph/test/binary_lifting.cpp new file mode 100644 index 0000000..05b903a --- /dev/null +++ b/graph/test/binary_lifting.cpp @@ -0,0 +1,67 @@ +#include "util.cpp" +#include "../binary_lifting.cpp" + +void test(vector<vector<int>> &adj, int root) { + int n = adj.size(); + + vector<int> parent(n); + function<void(int,int)> dfs = [&](int u, int p) { + parent[u] = p; + for (int v: adj[u]) if (v != p) dfs(v, u); + }; + dfs(root, -1); + + function<bool(int, int)> is_ancestor = [&](int u, int v) { + while (v != -1 && u != v) v = parent[v]; + return u == v; + }; + function<int(int)> depth = [&](int v) { + int r = 0; + while ((v = parent[v]) != -1) r++; + return r; + }; + + Lift lift(adj, root); + for (int i = 0; i < n; i++) assert(lift.depth(i) == depth(i)); + for (int i = 0; i < 1000; i++) { + int v = util::randint(n); + int d = util::randint(n); + int u = lift.lift(v, d); + assert(is_ancestor(u, v)); + if (d <= depth(v)) assert(depth(u) == d); + else assert(u == v); + } + for (int i = 0; i < 1000; i++) { + int u = util::randint(n); + int v = util::randint(n); + int lca = lift.lca(u, v); + assert(is_ancestor(lca, u)); + assert(is_ancestor(lca, v)); + for (int p = parent[lca]; int c: adj[lca]) { + if (c == p) continue; + assert(!is_ancestor(c, u) || !is_ancestor(c, v)); + } + } +} + +int main() { + { + // Single vertex + vector<vector<int>> adj(1); + test(adj, 0); + } + { + // Path + vector<vector<int>> adj = util::path(100); + int left = 0, mid = 40, right = 99; + util::shuffle_graph(adj, left, mid, right); + test(adj, left); + test(adj, mid); + test(adj, right); + } + { + // Random + vector<vector<int>> adj = util::random_tree(1000); + test(adj, 0); + } +} diff --git a/graph/test/util.cpp b/graph/test/util.cpp new file mode 100644 index 0000000..8c14b5c --- /dev/null +++ b/graph/test/util.cpp @@ -0,0 +1,60 @@ + +namespace util { + +void shuffle_adj_lists(vector<vector<int>> &adj) { + for (auto &a: adj) ranges::shuffle(a, rd); +} + +vector<vector<int>> random_tree(int n) { + vector<vector<int>> adj(n); + + vector<vector<int>> components(n); + for (int i = 0; i < n; i++) components[i].push_back(i); + while (components.size() > 1) { + int c1 = randint(components.size()); + int c2 = randint(components.size()-1); + c2 += (c2 >= c1); + + int v1 = components[c1][randint(components[c1].size())]; + int v2 = components[c2][randint(components[c2].size())]; + + adj[v1].push_back(v2); + adj[v2].push_back(v1); + + if (components[c1].size() < components[c2].size()) swap(c1, c2); + for (int v: components[c2]) components[c1].push_back(v); + swap(components[c2], components.back()); + components.pop_back(); + } + + shuffle_adj_lists(adj); + return adj; +} + +vector<vector<int>> path(int n) { + vector<vector<int>> adj(n); + for (int i = 1; i < n; i++) { + adj[i-1].push_back(i); + adj[i].push_back(i-1); + } + return adj; +} + +template<typename... Ts> +void shuffle_graph(vector<vector<int>> &adj, Ts &...vertex) { + int n = adj.size(); + vector<int> perm(n); + iota(perm.begin(), perm.end(), 0); + ranges::shuffle(perm, rd); + + vector<vector<int>> new_adj(n); + for (int i = 0; i < n; i++) { + auto &a = new_adj[perm[i]] = move(adj[i]); + for (int &v: a) v = perm[v]; + } + adj = move(new_adj); + shuffle_adj_lists(adj); + ((vertex = perm[vertex]), ...); +} + +} diff --git a/latexHeaders/code.sty b/latexHeaders/code.sty index a889596..af7f817 100644 --- a/latexHeaders/code.sty +++ b/latexHeaders/code.sty @@ -1,3 +1,6 @@ +\usepackage{ocgx2} +\usepackage{fontawesome} + % Colors, used for syntax highlighting. % To print this document, set all colors to black! \usepackage{xcolor} @@ -101,6 +104,32 @@ % \addtocounter{lstnumber}{-1}% %} +\ifthenelse{\isundefined{\srclink}}{}{ + \lst@AddToHook{Init}{% + \ifthenelse{\equal{\lst@name}{}}{}{% + \begin{minipage}[t][0pt]{\linewidth}% + \vspace{0pt}% + \hfill% + \begin{ocg}[printocg=never]{Source links}{srclinks}{1}% + \hfill\href{\srclink{\lst@name}}{\faExternalLink}% + \end{ocg}% + \end{minipage}% + }% + } +} + +\lst@AddToHook{DeInit}{% + \ifthenelse{\equal{\lst@name}{}}{}{% + \begin{minipage}[b][0pt]{\linewidth}% + \vspace{0pt}% + \hfill% + \begin{ocg}[printocg=never]{Source file names}{srcfiles}{0}% + \hfill\textcolor{gray}{\lst@name}% + \end{ocg}% + \end{minipage}% + }% +} + \newenvironment{btHighlight}[1][] {\begingroup\tikzset{bt@Highlight@par/.style={#1}}\begin{lrbox}{\@tempboxa}} {\end{lrbox}\bt@HL@box[bt@Highlight@par]{\@tempboxa}\endgroup} diff --git a/latexHeaders/commands.sty b/latexHeaders/commands.sty index edbba1b..73a7dca 100644 --- a/latexHeaders/commands.sty +++ b/latexHeaders/commands.sty @@ -7,6 +7,11 @@ \newcommand{\code}[1]{\lstinline[breaklines=true]{#1}} \let\codeSafe\lstinline +\ifoptional + \renewcommand{\columnbreak}{} + \newcommand\opthint{\textcolor{gray}{(optional)}} +\fi + \usepackage{tikz} \usetikzlibrary{angles,quotes} @@ -17,7 +22,7 @@ \ifthenelse{\equal{#1}{optional}}{% \optional{ \needspace{4\baselineskip}% - \subsection{#2\textcolor{gray}{(optional)}}% + \subsection{#2 \opthint}% #3% } }{% diff --git a/math/binomial0.cpp b/math/binomial0.cpp index 896a0f1..f37aea5 100644 --- a/math/binomial0.cpp +++ b/math/binomial0.cpp @@ -10,5 +10,5 @@ void precalc() { ll calc_binom(ll n, ll k) { if (n < 0 || n < k || k < 0) return 0; - return (inv[n] * inv[n-k] % mod) * fac[k] % mod; + return (fac[n] * inv[n-k] % mod) * inv[k] % mod; } diff --git a/math/linearRecurence.cpp b/math/linearRecurrence.cpp index 2501e64..2501e64 100644 --- a/math/linearRecurence.cpp +++ b/math/linearRecurrence.cpp diff --git a/math/math.tex b/math/math.tex index c157e1b..f5fbdca 100644 --- a/math/math.tex +++ b/math/math.tex @@ -25,7 +25,7 @@ \end{methods}
\sourcecode{math/permIndex.cpp}
\end{algorithm}
-\clearpage
+\columnbreak
\subsection{Mod-Exponent und Multiplikation über $\boldsymbol{\mathbb{F}_p}$}
%\vspace{-1.25em}
@@ -41,6 +41,11 @@ \item für $a > 10^9$ \code{__int128} oder \code{modMul} benutzten!
\end{itemize}
+\begin{algorithm}[optional]{Square root modulo prime}
+ \sourcecode{math/modSqrt.cpp}
+ \sourcecode{math/sqrtModCipolla.cpp}
+\end{algorithm}
+
\begin{algorithm}{ggT, kgV, erweiterter euklidischer Algorithmus}
\runtime{\log(a) + \log(b)}
\sourcecode{math/extendedEuclid.cpp}
@@ -142,7 +147,6 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \sourcecode{math/discreteNthRoot.cpp}
\end{algorithm}
-
\begin{algorithm}{Primitivwurzeln}
\begin{itemize}
\item Primitivwurzel modulo $n$ existiert $\Leftrightarrow$ $n \in \{2,\ 4,\ p^\alpha,\ 2\cdot p^\alpha \mid\ 2 < p \in \mathbb{P},\ \alpha \in \mathbb{N}\}$
@@ -158,21 +162,21 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \sourcecode{math/primitiveRoot.cpp}
\end{algorithm}
-\begin{algorithm}{Linearessieb und Multiplikative Funktionen}
+\begin{algorithm}{Lineares Sieb und multiplikative Funktionen}
Eine (zahlentheoretische) Funktion $f$ heißt multiplikativ wenn $f(1)=1$ und $f(a\cdot b)=f(a)\cdot f(b)$, falls $\ggT(a,b)=1$.
$\Rightarrow$ Es ist ausreichend $f(p^k)$ für alle primen $p$ und alle $k$ zu kennen.
\begin{methods}
\method{sieve}{berechnet Primzahlen und co.}{N}
- \method{sieved}{Wert der endsprechenden Multiplikativen Funktion}{1}
+ \method{sieved}{Wert der entsprechenden multiplikativen Funktion}{1}
- \method{naive}{Wert der endsprechenden Multiplikativen Funktion}{\sqrt{n}}
+ \method{naive}{Wert der entsprechenden multiplikativen Funktion}{\sqrt{n}}
\end{methods}
\textbf{Wichtig:} Sieb rechts ist schneller für \code{isPrime} oder \code{primes}!
\sourcecode{math/linearSieve.cpp}
- \textbf{\textsc{Möbius}-Funtkion:}
+ \textbf{\textsc{Möbius} Funktion:}
\begin{itemize}
\item $\mu(n)=+1$, falls $n$ quadratfrei ist und gerade viele Primteiler hat
\item $\mu(n)=-1$, falls $n$ quadratfrei ist und ungerade viele Primteiler hat
@@ -185,7 +189,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \item $p$ prim, $k \in \mathbb{N}$:
$~\varphi(p^k) = p^k - p^{k - 1}$
- \item \textbf{Euler's Theorem:}
+ \item \textbf{\textsc{Euler}'s Theorem:}
Für $b \geq \varphi(c)$ gilt: $a^b \equiv a^{b \bmod \varphi(c) + \varphi(c)} \pmod{c}$. Darüber hinaus gilt: $\gcd(a, c) = 1 \Leftrightarrow a^b \equiv a^{b \bmod \varphi(c)} \pmod{c}$.
Falls $m$ prim ist, liefert das den \textbf{kleinen Satz von \textsc{Fermat}}:
$a^{m} \equiv a \pmod{m}$
@@ -324,7 +328,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: \begin{methods}
\method{kthTerm}{Berechnet $k$-ten Term einer Rekurrenz $n$-ter Ordnung}{\log(k)\cdot n^2}
\end{methods}
- \sourcecode{math/linearRecurence.cpp}
+ \sourcecode{math/linearRecurrence.cpp}
Alternativ kann der \mbox{$k$-te} Term in \runtime{n^3\log(k)} berechnet werden:
$$\renewcommand\arraystretch{1.5}
\setlength\arraycolsep{3pt}
@@ -379,7 +383,7 @@ Wenn man $k$ Spiele in den Zuständen $X_1, \ldots, X_k$ hat, dann ist die \text \subsection{Kombinatorik}
-\paragraph{Wilsons Theorem}
+\paragraph{\textsc{Wilson}'s Theorem}
A number $n$ is prime if and only if
$(n-1)!\equiv -1\bmod{n}$.\\
($n$ is prime if and only if $(m-1)!\cdot(n-m)!\equiv(-1)^m\bmod{n}$ for all $m$ in $\{1,\dots,n\}$)
@@ -391,14 +395,14 @@ $(n-1)!\equiv -1\bmod{n}$.\\ \end{cases}
\end{align*}
-\paragraph{\textsc{Zeckendorfs} Theorem}
+\paragraph{\textsc{Zeckendorf}'s Theorem}
Jede positive natürliche Zahl kann eindeutig als Summe einer oder mehrerer
verschiedener \textsc{Fibonacci}-Zahlen geschrieben werden, sodass keine zwei
aufeinanderfolgenden \textsc{Fibonacci}-Zahlen in der Summe vorkommen.\\
\emph{Lösung:} Greedy, nimm immer die größte \textsc{Fibonacci}-Zahl, die noch
hineinpasst.
-\paragraph{\textsc{Lucas}-Theorem}
+\paragraph{\textsc{Lucas}'s Theorem}
Ist $p$ prim, $m=\sum_{i=0}^km_ip^i$, $n=\sum_{i=0}^kn_ip^i$ ($p$-adische Darstellung),
so gilt
\vspace{-0.75\baselineskip}
@@ -409,7 +413,7 @@ so gilt %\begin{algorithm}{Binomialkoeffizienten}
\paragraph{Binomialkoeffizienten}
Die Anzahl der \mbox{$k$-elementigen} Teilmengen einer \mbox{$n$-elementigen} Menge.
-
+
\begin{methods}
\method{precalc}{berechnet $n!$ und $n!^{-1}$ vor}{\mathit{lim}}
\method{calc\_binom}{berechnet Binomialkoeffizient}{1}
diff --git a/math/shortModInv.cpp b/math/shortModInv.cpp index 244bacf..f696cce 100644 --- a/math/shortModInv.cpp +++ b/math/shortModInv.cpp @@ -1,3 +1,3 @@ ll multInv(ll x, ll m) { // x^{-1} mod m - return 1 < x ? m - inv(m % x, x) * m / x : 1; + return 1 < x ? m - multInv(m % x, x) * m / x : 1; } diff --git a/math/tables.tex b/math/tables.tex index 53f3758..c422d73 100644 --- a/math/tables.tex +++ b/math/tables.tex @@ -1,8 +1,12 @@ \enlargethispage{0.2cm} \begin{multicols*}{2} + \refstepcounter{subsection} + \subsectionmark{Tables} + \addcontentsline{toc}{subsection}{\protect\numberline{\thesubsection}Tables} + \input{math/tables/binom} \vfill - \input{math/tables/composite} + \input{math/tables/prime-composite} \vfill \input{math/tables/platonic} \vfill diff --git a/math/tables/composite.tex b/math/tables/prime-composite.tex index 8e14b2e..4c32c7d 100644 --- a/math/tables/composite.tex +++ b/math/tables/prime-composite.tex @@ -1,5 +1,4 @@ - -\begin{tabularx}{\linewidth}{|r||r|r||r|r|r||C|} +\begin{tabularx}{\linewidth}{|r|rIr|rIrIr|C|} \hline \multicolumn{7}{|c|}{Important Numbers} \\ \hline diff --git a/math/tables/stuff.tex b/math/tables/stuff.tex index 5b5093e..3cf8b4c 100644 --- a/math/tables/stuff.tex +++ b/math/tables/stuff.tex @@ -23,7 +23,7 @@ \#Wälder mit $k$ gewurzelten Bäumen mit vorgegebenen Wurzelknoten& $\frac{k}{n}n^{n-k}$ \\ - Dearangements & + Derangements & $!n = (n - 1)(!(n - 1) + !(n - 2)) = \left\lfloor\frac{n!}{e} + \frac{1}{2}\right\rfloor$ \\ & $\lim\limits_{n \to \infty} \frac{!n}{n!} = \frac{1}{e}$ \\ diff --git a/math/test/binomial0.cpp b/math/test/binomial0.cpp new file mode 100644 index 0000000..d6c3a03 --- /dev/null +++ b/math/test/binomial0.cpp @@ -0,0 +1,21 @@ +constexpr ll mod = 1'000'000'007; + +#include "../shortModInv.cpp" +#include "../binomial0.cpp" + +int main() { + precalc(); + assert(calc_binom(5, -1) == 0); + assert(calc_binom(5, 0) == 1); + assert(calc_binom(5, 1) == 5); + assert(calc_binom(5, 2) == 10); + assert(calc_binom(5, 3) == 10); + assert(calc_binom(5, 4) == 5); + assert(calc_binom(5, 5) == 1); + assert(calc_binom(5, 6) == 0); + assert(calc_binom(0, 0) == 1); + assert(calc_binom(-1, 0) == 0); + assert(calc_binom(-1, -1) == 0); + assert(calc_binom(-1, -2) == 0); + assert(calc_binom(100'000, 50'000) == 149033233); +} diff --git a/other/other.tex b/other/other.tex index 38434a5..1760b01 100644 --- a/other/other.tex +++ b/other/other.tex @@ -17,9 +17,9 @@ \begin{expandtable}
\begin{tabularx}{\linewidth}{|Ll|}
\hline
- Bit an Position j lesen & \code{(x & (1 << j)) != 0} \\
+ Bit an Position j lesen & \code{(x \& (1 << j)) != 0} \\
Bit an Position j setzten & \code{x |= (1 << j)} \\
- Bit an Position j löschen & \code{x &= ~(1 << j)} \\
+ Bit an Position j löschen & \code{x \&= ~(1 << j)} \\
Bit an Position j flippen & \code{x ^= (1 << j)} \\
Anzahl an führenden nullen ($x \neq 0$) & \code{__builtin_clzll(x)} \\
Anzahl an schließenden nullen ($x \neq 0$) & \code{__builtin_ctzll(x)} \\
@@ -36,9 +36,9 @@ \begin{expandtable}
\begin{tabularx}{\linewidth}{|lR|}
\hline
- Addition & \code{__builtin_saddll_overflow(a, b, &c)} \\
- Subtraktion & \code{__builtin_ssubll_overflow(a, b, &c)} \\
- Multiplikation & \code{__builtin_smulll_overflow(a, b, &c)} \\
+ Addition & \code{__builtin_saddll_overflow(a, b, \&c)} \\
+ Subtraktion & \code{__builtin_ssubll_overflow(a, b, \&c)} \\
+ Multiplikation & \code{__builtin_smulll_overflow(a, b, \&c)} \\
\hline
\end{tabularx}
\end{expandtable}
@@ -129,7 +129,7 @@ \item \textbf{System von Differenzbeschränkungen:}
Ändere alle Bedingungen in die Form $a-b \leq c$.
- Für jede Bedingung füge eine Kante \texttt{(b,a)} mit Gweicht \texttt{c} ein.
+ Für jede Bedingung füge eine Kante \texttt{(b,a)} mit Gewicht \texttt{c} ein.
Füge Quelle \texttt{s} hinzu, mit Kanten zu allen Knoten mit Gewicht 0.
Nutze \textsc{Bellmann-Ford}, um die kürzesten Pfade von \texttt{s} aus zu finden.
\texttt{d[v]} ist mögliche Lösung für \texttt{v}.
diff --git a/string/string.tex b/string/string.tex index dbea36c..4b8a880 100644 --- a/string/string.tex +++ b/string/string.tex @@ -63,21 +63,21 @@ \end{algorithm} \clearpage -\begin{algorithm}{Lyndon und De-Bruijn} +\begin{algorithm}{\textsc{Lyndon} und \textsc{De-Bruijn}} \begin{itemize} - \item \textbf{Lyndon-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen. - \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von Lyndon-Worten zerlegt werden. - \item Für Lyndon-Worte $u, v$ mit $u<v$ gilt, dass $uv$ auch ein Lyndon-Wort ist. + \item \textbf{\textsc{Lyndon}-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen. + \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von \textsc{Lyndon}-Worten zerlegt werden. + \item Für \textsc{Lyndon}-Worte $u, v$ mit $u<v$ gilt, dass $uv$ auch ein \textsc{Lyndon}-Wort ist. \end{itemize} \begin{methods} - \method[, Durchschnitt $\Theta(1)$]{next}{lexikographisch nächstes Lyndon-Wort}{n} - \method{duval}{zerlegt $s$ in Lyndon-Worte}{n} + \method[, Durchschnitt $\Theta(1)$]{next}{lexikographisch nächstes \textsc{Lyndon}-Wort}{n} + \method{duval}{zerlegt $s$ in \textsc{Lyndon}-Worte}{n} \method{minrotation}{berechnet kleinste Rotation von $s$}{n} \end{methods} \sourcecode{string/lyndon.cpp} \sourcecode{string/duval.cpp} \begin{itemize} - \item \textbf{De-Bruijn-Sequenze $\boldsymbol{B(\Sigma, n)}$:}~~~ein Wort das jedes Wort der Länge $n$ genau einmal als substring enthält (und minimal ist). Wobei $B(\Sigma, n)$ zyklisch betrachtet wird. + \item \textbf{\textsc{De-Bruijn}-Sequenz $\boldsymbol{B(\Sigma, n)}$:}~~~ein Wort das jedes Wort der Länge $n$ genau einmal als substring enthält (und minimal ist). Wobei $B(\Sigma, n)$ zyklisch betrachtet wird. \item es gibt $\frac{(k!)^{k^{n-1}}}{k^{n}}$ verschiedene $B(\Sigma, n)$ \item $B(\Sigma, n)$ hat Länge $\abs{\Sigma}^n$ \end{itemize} diff --git a/tcr.pdf b/tcr.pdf Binary files differdeleted file mode 100644 index 5cc63a7..0000000 --- a/tcr.pdf +++ /dev/null @@ -3,12 +3,17 @@ \documentclass[a4paper,fontsize=7.8pt]{scrartcl} % General information. -\newcommand{\teamname}{Kindergarten Timelimit} +\newcommand{\teamname}{Infinite Loopers} \newcommand{\university}{Karlsruhe Institute of Technology} +% Source code links (optional) +\newcommand{\srclink}[1]{https://git.gloria-mundi.eu/tcr/plain/#1} + % Options \newif\ifoptional -%\optionaltrue +\ifdefined\OPTIONAL + \optionaltrue +\fi % Font encoding. \usepackage[T1]{fontenc} @@ -41,6 +46,7 @@ % Content. \begin{multicols*}{3} + \raggedcolumns \input{datastructures/datastructures} \input{graph/graph} \input{geometry/geometry} @@ -49,17 +55,12 @@ \clearpage \input{math/tables} \begin{multicols*}{3} + \raggedcolumns \input{string/string} \input{python/python} \input{other/other} \input{template/template} \clearpage - \ifodd\value{page} - \else - \null - \thispagestyle{empty} - \clearpage - \fi \input{tests/test} \end{multicols*} \end{document} diff --git a/template/console.cpp b/template/console.sh index 31885e9..31885e9 100644 --- a/template/console.cpp +++ b/template/console.sh diff --git a/template/template.tex b/template/template.tex index 3525ddf..bf82199 100644 --- a/template/template.tex +++ b/template/template.tex @@ -5,5 +5,5 @@ \end{algorithm} \begin{algorithm}{Console} - \sourcecode{template/console.cpp} + \sourcecode{template/console.sh} \end{algorithm} @@ -0,0 +1,33 @@ +#define _GLIBCXX_DEBUG 1 +#define _GLIBCXX_SANITIZE_VECTOR 1 +#include <bits/stdc++.h> +using namespace std; +using ll = long long; + +template<typename T> +T _lg_check(T n) { + assert(n > 0); + return __lg(n); +} + +#define __lg _lg_check + +namespace util { + +mt19937 rd(0); + +int randint(int l, int r) { + assert(l <= r); + return uniform_int_distribution<int>(l, r)(rd); +} + +int randint(int x) { + assert(x > 0); + return randint(0, x-1); +} + +int randint() { + return randint(-1'000'000, +1'000'000); +} + +} |
