summaryrefslogtreecommitdiff
path: root/content
diff options
context:
space:
mode:
Diffstat (limited to 'content')
-rw-r--r--content/datastructures/LCT.cpp178
-rw-r--r--content/datastructures/bitset.cpp7
-rw-r--r--content/datastructures/datastructures.tex121
-rw-r--r--content/datastructures/dynamicConvexHull.cpp36
-rw-r--r--content/datastructures/fenwickTree.cpp15
-rw-r--r--content/datastructures/fenwickTree2.cpp21
-rw-r--r--content/datastructures/lazyPropagation.cpp85
-rw-r--r--content/datastructures/lichao.cpp46
-rw-r--r--content/datastructures/monotonicConvexHull.cpp27
-rw-r--r--content/datastructures/pbds.cpp18
-rw-r--r--content/datastructures/persistent.cpp18
-rw-r--r--content/datastructures/persistentArray.cpp24
-rw-r--r--content/datastructures/segmentTree.cpp42
-rw-r--r--content/datastructures/sparseTable.cpp24
-rw-r--r--content/datastructures/sparseTableDisjoint.cpp27
-rw-r--r--content/datastructures/stlHashMap.cpp17
-rw-r--r--content/datastructures/stlPriorityQueue.cpp8
-rw-r--r--content/datastructures/stlRope.cpp8
-rw-r--r--content/datastructures/stlTree.cpp13
-rw-r--r--content/datastructures/treap.cpp79
-rw-r--r--content/datastructures/treap2.cpp79
-rw-r--r--content/datastructures/unionFind.cpp26
-rw-r--r--content/datastructures/waveletTree.cpp40
-rw-r--r--content/geometry/antipodalPoints.cpp12
-rw-r--r--content/geometry/circle.cpp33
-rw-r--r--content/geometry/closestPair.cpp27
-rw-r--r--content/geometry/convexHull.cpp18
-rw-r--r--content/geometry/delaunay.cpp124
-rw-r--r--content/geometry/formulas.cpp42
-rw-r--r--content/geometry/formulas3d.cpp53
-rw-r--r--content/geometry/geometry.tex62
-rw-r--r--content/geometry/hpi.cpp68
-rw-r--r--content/geometry/lines.cpp33
-rw-r--r--content/geometry/linesAndSegments.cpp89
-rw-r--r--content/geometry/polygon.cpp150
-rw-r--r--content/geometry/segmentIntersection.cpp63
-rw-r--r--content/geometry/sortAround.cpp11
-rw-r--r--content/geometry/spheres.cpp29
-rw-r--r--content/geometry/triangle.cpp43
-rw-r--r--content/geometry/triangle.tex41
-rw-r--r--content/graph/2sat.cpp31
-rw-r--r--content/graph/LCA_sparse.cpp32
-rw-r--r--content/graph/TSP.cpp29
-rw-r--r--content/graph/articulationPoints.cpp43
-rw-r--r--content/graph/bellmannFord.cpp19
-rw-r--r--content/graph/bitonicTSP.cpp31
-rw-r--r--content/graph/bitonicTSPsimple.cpp27
-rw-r--r--content/graph/blossom.cpp82
-rw-r--r--content/graph/bronKerbosch.cpp24
-rw-r--r--content/graph/centroid.cpp21
-rw-r--r--content/graph/connect.cpp31
-rw-r--r--content/graph/cycleCounting.cpp64
-rw-r--r--content/graph/dfs.tex16
-rw-r--r--content/graph/dijkstra.cpp21
-rw-r--r--content/graph/dinicScaling.cpp51
-rw-r--r--content/graph/euler.cpp23
-rw-r--r--content/graph/floydWarshall.cpp27
-rw-r--r--content/graph/graph.tex269
-rw-r--r--content/graph/havelHakimi.cpp18
-rw-r--r--content/graph/hld.cpp44
-rw-r--r--content/graph/hopcroftKarp.cpp47
-rw-r--r--content/graph/kruskal.cpp9
-rw-r--r--content/graph/matching.cpp23
-rw-r--r--content/graph/maxCarBiMatch.cpp25
-rw-r--r--content/graph/maxWeightBipartiteMatching.cpp50
-rw-r--r--content/graph/minCostMaxFlow.cpp66
-rw-r--r--content/graph/pushRelabel.cpp64
-rw-r--r--content/graph/reroot.cpp62
-rw-r--r--content/graph/scc.cpp32
-rw-r--r--content/graph/stoerWagner.cpp53
-rw-r--r--content/graph/treeIsomorphism.cpp15
-rw-r--r--content/graph/virtualTree.cpp22
-rw-r--r--content/latexHeaders/code.sty141
-rw-r--r--content/latexHeaders/commands.sty56
-rw-r--r--content/latexHeaders/layout.sty82
-rw-r--r--content/latexHeaders/math.sty98
-rw-r--r--content/math/berlekampMassey.cpp31
-rw-r--r--content/math/bigint.cpp271
-rw-r--r--content/math/binomial0.cpp14
-rw-r--r--content/math/binomial1.cpp8
-rw-r--r--content/math/binomial2.cpp32
-rw-r--r--content/math/binomial3.cpp10
-rw-r--r--content/math/chineseRemainder.cpp14
-rw-r--r--content/math/cycleDetection.cpp18
-rw-r--r--content/math/discreteLogarithm.cpp17
-rw-r--r--content/math/discreteNthRoot.cpp5
-rw-r--r--content/math/divisors.cpp11
-rw-r--r--content/math/extendedEuclid.cpp6
-rw-r--r--content/math/gauss.cpp36
-rw-r--r--content/math/gcd-lcm.cpp2
-rw-r--r--content/math/goldenSectionSearch.cpp15
-rw-r--r--content/math/inversions.cpp9
-rw-r--r--content/math/inversionsMerge.cpp27
-rw-r--r--content/math/kthperm.cpp14
-rw-r--r--content/math/legendre.cpp4
-rw-r--r--content/math/lgsFp.cpp26
-rw-r--r--content/math/linearCongruence.cpp5
-rw-r--r--content/math/linearRecurence.cpp33
-rw-r--r--content/math/linearSieve.cpp50
-rw-r--r--content/math/longestIncreasingSubsequence.cpp17
-rw-r--r--content/math/math.tex525
-rw-r--r--content/math/matrixPower.cpp14
-rw-r--r--content/math/millerRabin.cpp19
-rw-r--r--content/math/modExp.cpp6
-rw-r--r--content/math/modMulIterativ.cpp9
-rw-r--r--content/math/modPowIterativ.cpp9
-rw-r--r--content/math/multInv.cpp4
-rw-r--r--content/math/permIndex.cpp13
-rw-r--r--content/math/piLegendre.cpp23
-rw-r--r--content/math/piLehmer.cpp52
-rw-r--r--content/math/polynomial.cpp65
-rw-r--r--content/math/primeSieve.cpp16
-rw-r--r--content/math/primitiveRoot.cpp23
-rw-r--r--content/math/rho.cpp19
-rw-r--r--content/math/shortModInv.cpp3
-rw-r--r--content/math/simpson.cpp12
-rw-r--r--content/math/sqrtModCipolla.cpp14
-rw-r--r--content/math/squfof.cpp89
-rw-r--r--content/math/tables.tex18
-rw-r--r--content/math/tables/binom.tex28
-rw-r--r--content/math/tables/composite.tex27
-rw-r--r--content/math/tables/nim.tex96
-rw-r--r--content/math/tables/numbers.tex59
-rw-r--r--content/math/tables/platonic.tex39
-rw-r--r--content/math/tables/probability.tex27
-rw-r--r--content/math/tables/series.tex33
-rw-r--r--content/math/tables/stuff.tex32
-rw-r--r--content/math/tables/twelvefold.tex32
-rw-r--r--content/math/transforms/andTransform.cpp8
-rw-r--r--content/math/transforms/bitwiseTransforms.cpp12
-rw-r--r--content/math/transforms/fft.cpp23
-rw-r--r--content/math/transforms/fftMul.cpp15
-rw-r--r--content/math/transforms/multiplyBitwise.cpp8
-rw-r--r--content/math/transforms/multiplyFFT.cpp12
-rw-r--r--content/math/transforms/multiplyNTT.cpp8
-rw-r--r--content/math/transforms/ntt.cpp23
-rw-r--r--content/math/transforms/orTransform.cpp8
-rw-r--r--content/math/transforms/seriesOperations.cpp56
-rw-r--r--content/math/transforms/xorTransform.cpp10
-rw-r--r--content/other/bitOps.cpp18
-rw-r--r--content/other/compiletime.cpp7
-rw-r--r--content/other/divideAndConquer.cpp27
-rw-r--r--content/other/fastIO.cpp24
-rw-r--r--content/other/josephus2.cpp8
-rw-r--r--content/other/josephusK.cpp5
-rw-r--r--content/other/knuth.cpp15
-rw-r--r--content/other/other.tex312
-rw-r--r--content/other/pbs.cpp19
-rw-r--r--content/other/pragmas.cpp6
-rw-r--r--content/other/sos.cpp6
-rw-r--r--content/other/split.cpp10
-rw-r--r--content/other/stress.sh7
-rw-r--r--content/other/stuff.cpp29
-rw-r--r--content/other/timed.cpp3
-rw-r--r--content/python/io.py3
-rw-r--r--content/python/python.tex10
-rw-r--r--content/python/recursion.py2
-rw-r--r--content/string/ahoCorasick.cpp52
-rw-r--r--content/string/deBruijn.cpp7
-rw-r--r--content/string/duval.cpp21
-rw-r--r--content/string/kmp.cpp20
-rw-r--r--content/string/longestCommonSubsequence.cpp15
-rw-r--r--content/string/lyndon.cpp11
-rw-r--r--content/string/manacher.cpp20
-rw-r--r--content/string/rollingHash.cpp18
-rw-r--r--content/string/rollingHashCf.cpp17
-rw-r--r--content/string/string.tex132
-rw-r--r--content/string/suffixArray.cpp38
-rw-r--r--content/string/suffixAutomaton.cpp63
-rw-r--r--content/string/suffixTree.cpp72
-rw-r--r--content/string/trie.cpp35
-rw-r--r--content/string/z.cpp10
-rw-r--r--content/tcr.tex65
-rw-r--r--content/template/console.sh2
-rw-r--r--content/template/template.cpp17
-rw-r--r--content/template/template.tex9
-rw-r--r--content/tests/gcc5bug.cpp4
-rw-r--r--content/tests/precision.cpp8
-rw-r--r--content/tests/test.tex43
-rw-r--r--content/tests/whitespace.cpp1
180 files changed, 6960 insertions, 0 deletions
diff --git a/content/datastructures/LCT.cpp b/content/datastructures/LCT.cpp
new file mode 100644
index 0000000..c1dd278
--- /dev/null
+++ b/content/datastructures/LCT.cpp
@@ -0,0 +1,178 @@
+constexpr ll queryDefault = 0;
+constexpr ll updateDefault = 0;
+
+ll _modify(ll x, ll y) {
+ return x + y;
+}
+
+ll _query(ll x, ll y) {
+ return x + y;
+}
+
+ll _update(ll delta, int length) {
+ if (delta == updateDefault) return updateDefault;
+ //ll result = delta
+ //for (int i=1; i<length; i++) result = _query(result, delta);
+ return delta * length;
+}
+
+//generic:
+ll joinValueDelta(ll value, ll delta) {
+ if (delta == updateDefault) return value;
+ return _modify(value, delta);
+}
+
+ll joinDeltas(ll delta1, ll delta2) {
+ if (delta1 == updateDefault) return delta2;
+ if (delta2 == updateDefault) return delta1;
+ return _modify(delta1, delta2);
+}
+
+struct LCT {
+ struct Node {
+ ll nodeValue, subTreeValue, delta;
+ bool revert;
+ int id, size;
+ Node *left, *right, *parent;
+
+ Node(int id = 0, int val = queryDefault) :
+ nodeValue(val), subTreeValue(val), delta(updateDefault),
+ revert(false), id(id), size(1),
+ left(nullptr), right(nullptr), parent(nullptr) {}
+
+ bool isRoot() {
+ return !parent || (parent->left != this &&
+ parent->right != this);
+ }
+
+ void push() {
+ if (revert) {
+ revert = false;
+ swap(left, right);
+ if (left) left->revert ^= 1;
+ if (right) right->revert ^= 1;
+ }
+ nodeValue = joinValueDelta(nodeValue, delta);
+ subTreeValue = joinValueDelta(subTreeValue,
+ _update(delta, size));
+ if (left) left->delta = joinDeltas(left->delta, delta);
+ if (right) right->delta = joinDeltas(right->delta, delta);
+ delta = updateDefault;
+ }
+
+ ll getSubtreeValue() {
+ return joinValueDelta(subTreeValue, _update(delta, size));
+ }
+
+ void update() {
+ subTreeValue = joinValueDelta(nodeValue, delta);
+ size = 1;
+ if (left) {
+ subTreeValue = _query(subTreeValue,
+ left->getSubtreeValue());
+ size += left->size;
+ }
+ if (right) {
+ subTreeValue = _query(subTreeValue,
+ right->getSubtreeValue());
+ size += right->size;
+ }}
+ };
+
+ vector<Node> nodes;
+
+ LCT(int n) : nodes(n) {
+ for (int i = 0; i < n; i++) nodes[i].id = i;
+ }
+
+ void connect(Node* ch, Node* p, int isLeftChild) {
+ if (ch) ch->parent = p;
+ if (isLeftChild >= 0) {
+ if (isLeftChild) p->left = ch;
+ else p->right = ch;
+ }}
+
+ void rotate(Node* x) {
+ Node* p = x->parent;
+ Node* g = p->parent;
+ bool isRootP = p->isRoot();
+ bool leftChildX = (x == p->left);
+
+ connect(leftChildX ? x->right : x->left, p, leftChildX);
+ connect(p, x, !leftChildX);
+ connect(x, g, isRootP ? -1 : p == g->left);
+ p->update();
+ }
+
+ void splay(Node* x) {
+ while (!x->isRoot()) {
+ Node* p = x->parent;
+ Node* g = p->parent;
+ if (!p->isRoot()) g->push();
+ p->push();
+ x->push();
+ if (!p->isRoot()) rotate((x == p->left) ==
+ (p == g->left) ? p : x);
+ rotate(x);
+ }
+ x->push();
+ x->update();
+ }
+
+ Node* expose(Node* x) {
+ Node* last = nullptr;
+ for (Node* y = x; y; y = y->parent) {
+ splay(y);
+ y->left = last;
+ last = y;
+ }
+ splay(x);
+ return last;
+ }
+
+ void makeRoot(Node* x) {
+ expose(x);
+ x->revert ^= 1;
+ }
+
+ bool connected(Node* x, Node* y) {
+ if (x == y) return true;
+ expose(x);
+ expose(y);
+ return x->parent;
+ }
+
+ void link(Node* x, Node* y) {
+ assert(!connected(x, y)); // not yet connected!
+ makeRoot(x);
+ x->parent = y;
+ }
+
+ void cut(Node* x, Node* y) {
+ makeRoot(x);
+ expose(y);
+ //must be a tree edge!
+ assert(!(y->right != x || x->left != nullptr));
+ y->right->parent = nullptr;
+ y->right = nullptr;
+ }
+
+ Node* lca(Node* x, Node* y) {
+ assert(connected(x, y));
+ expose(x);
+ return expose(y);
+ }
+
+ ll query(Node* from, Node* to) {
+ makeRoot(from);
+ expose(to);
+ if (to) return to->getSubtreeValue();
+ return queryDefault;
+ }
+
+ void modify(Node* from, Node* to, ll delta) {
+ makeRoot(from);
+ expose(to);
+ to->delta = joinDeltas(to->delta, delta);
+ }
+};
diff --git a/content/datastructures/bitset.cpp b/content/datastructures/bitset.cpp
new file mode 100644
index 0000000..d19abb0
--- /dev/null
+++ b/content/datastructures/bitset.cpp
@@ -0,0 +1,7 @@
+bitset<10> bits(0b000010100);
+bits._Find_first(); //2
+bits._Find_next(2); //4
+bits._Find_next(4); //10 bzw. N
+bits[x] = 1; //not bits.set(x) or bits.reset(x)!
+bits[x].flip(); //not bits.flip(x)!
+bits.count(); //number of set bits
diff --git a/content/datastructures/datastructures.tex b/content/datastructures/datastructures.tex
new file mode 100644
index 0000000..40132a9
--- /dev/null
+++ b/content/datastructures/datastructures.tex
@@ -0,0 +1,121 @@
+\section{Datenstrukturen}
+
+\begin{algorithm}{Segmentbaum}
+ \begin{methods}
+ \method{SegTree}{baut den Baum auf}{n}
+ \method{query}{findet Summe über $[l, r)$}{\log(n)}
+ \method{update}{ändert einen Wert}{\log(n)}
+ \end{methods}
+ \sourcecode{datastructures/segmentTree.cpp}
+
+ \subsubsection{Lazy Propagation}
+ Assignment modifications, sum queries \\
+ \method{lower\_bound}{erster Index in $[l, r)$ $\geq$ x (erfordert max-combine)}{\log(n)}
+ \sourcecode{datastructures/lazyPropagation.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Wavelet Tree}
+ \begin{methods}
+ \method{WaveletTree}{baut den Baum auf}{n\*\log(\Sigma)}
+ \method{kth}{sort $[l, r)[k]$}{\log(\Sigma)}
+ \method{countSmaller}{Anzahl elemente in $[l, r)$ kleiner als $k$}{\log(\Sigma)}
+ \end{methods}
+ \sourcecode{datastructures/waveletTree.cpp}
+\end{algorithm}
+\columnbreak
+
+\begin{algorithm}{Fenwick Tree}
+ \begin{methods}
+ \method{init}{baut den Baum auf}{n\*\log(n)}
+ \method{prefix\_sum}{summe von $[0, i]$}{\log(n)}
+ \method{update}{addiert ein Delta zu einem Element}{\log(n)}
+ \end{methods}
+ \sourcecode{datastructures/fenwickTree.cpp}
+
+ \begin{methods}
+ \method{init}{baut den Baum auf}{n\*\log(n)}
+ \method{prefix\_sum}{summe von [$0, i]$}{\log(n)}
+ \method{update}{addiert ein Delta zu allen Elementen $[l, r)$. $l\leq r$!}{\log(n)}
+ \end{methods}
+ \sourcecode{datastructures/fenwickTree2.cpp}
+\end{algorithm}
+
+\begin{algorithm}{STL-Rope (Implicit Cartesian Tree)}
+ \sourcecode{datastructures/stlRope.cpp}
+\end{algorithm}
+\columnbreak
+
+\begin{algorithm}{(Implicit) Treap (Cartesian Tree)}
+ \begin{methods}
+ \method{insert}{fügt wert $\mathit{val}$ an stelle $i$ ein (verschiebt alle Positionen $\geq i$)}{\log(n)}
+ \method{remove}{löscht werte $[i,i+\mathit{count})$}{\log(n)}
+ \end{methods}
+ \sourcecode{datastructures/treap2.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Range Minimum Query}
+ \begin{methods}
+ \method{init}{baut Struktur auf}{n\*\log(n)}
+ \method{queryIdempotent}{Index des Minimums in $[l, r)$. $l<r$!}{1}
+ \end{methods}
+ \begin{itemize}
+ \item \code{better}-Funktion muss idempotent sein!
+ \end{itemize}
+ \sourcecode{datastructures/sparseTable.cpp}
+\end{algorithm}
+
+\begin{algorithm}{STL-Bitset}
+ \sourcecode{datastructures/bitset.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Link-Cut-Tree}
+ \begin{methods}
+ \method{LCT}{baut Wald auf}{n}
+ \method{connected}{prüft ob zwei Knoten im selben Baum liegen}{\log(n)}
+ \method{link}{fügt $\{x,y\}$ Kante ein}{\log(n)}
+ \method{cut}{entfernt $\{x,y\}$ Kante}{\log(n)}
+ \method{lca}{berechnet LCA von $x$ und $y$}{\log(n)}
+ \method{query}{berechnet \code{query} auf den Knoten des $xy$-Pfades}{\log(n)}
+ \method{modify}{erhöht jeden wert auf dem $xy$-Pfad}{\log(n)}
+ \end{methods}
+ \sourcecode{datastructures/LCT.cpp}
+\end{algorithm}
+\clearpage
+
+\begin{algorithm}{Lichao}
+ \sourcecode{datastructures/lichao.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Policy Based Data Structures}
+ \textbf{Wichtig:} Verwende \code{p.swap(p2)} anstatt \code{swap(p, p2)}!
+ \sourcecode{datastructures/stlPriorityQueue.cpp}
+ \columnbreak
+ \sourcecode{datastructures/pbds.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Lower/Upper Envelope (Convex Hull Optimization)}
+ Um aus einem lower envelope einen upper envelope zu machen (oder umgekehrt), einfach beim Einfügen der Geraden $m$ und $b$ negieren.
+ \sourcecode{datastructures/monotonicConvexHull.cpp}
+ \sourcecode{datastructures/dynamicConvexHull.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Union-Find}
+ \begin{methods}
+ \method{init}{legt $n$ einzelne Unions an}{n}
+ \method{findSet}{findet den Repräsentanten}{\log(n)}
+ \method{unionSets}{vereint 2 Mengen}{\log(n)}
+ \method{m\*findSet + n\*unionSets}{Folge von Befehlen}{n+m\*\alpha(n)}
+ \end{methods}
+ \sourcecode{datastructures/unionFind.cpp}
+\end{algorithm}
+\columnbreak
+
+\begin{algorithm}{Persistent}
+ \begin{methods}
+ \method{get}{berechnet Wert zu Zeitpunkt $t$}{\log(t)}
+ \method{set}{ändert Wert zu Zeitpunkt $t$}{\log(t)}
+ \method{reset}{setzt die Datenstruktur auf Zeitpunkt $t$}{1}
+ \end{methods}
+ \sourcecode{datastructures/persistent.cpp}
+ \sourcecode{datastructures/persistentArray.cpp}
+\end{algorithm}
diff --git a/content/datastructures/dynamicConvexHull.cpp b/content/datastructures/dynamicConvexHull.cpp
new file mode 100644
index 0000000..d669847
--- /dev/null
+++ b/content/datastructures/dynamicConvexHull.cpp
@@ -0,0 +1,36 @@
+struct Line {
+ mutable ll m, b, p;
+ bool operator<(const Line& o) const {return m < o.m;}
+ bool operator<(ll x) const {return p < x;}
+};
+
+struct HullDynamic : multiset<Line, less<>> {
+ // (for doubles, use inf = 1/.0, div(a,b) = a/b)
+ ll div(ll a, ll b) {return a / b - ((a ^ b) < 0 && a % b);}
+
+ bool isect(iterator x, iterator y) {
+ if (y == end()) {x->p = INF; return false;}
+ if (x->m == y->m) x->p = x->b > y->b ? INF : -INF;
+ else x->p = div(y->b - x->b, x->m - y->m);
+ return x->p >= y->p;
+ }
+
+ void add(ll m, ll b) {
+ auto x = insert({m, b, 0});
+ while (isect(x, next(x))) erase(next(x));
+ if (x != begin()) {
+ x--;
+ if (isect(x, next(x))) {
+ erase(next(x));
+ isect(x, next(x));
+ }}
+ while (x != begin() && prev(x)->p >= x->p) {
+ x--;
+ isect(x, erase(next(x)));
+ }}
+
+ ll query(ll x) {
+ auto l = *lower_bound(x);
+ return l.m * x + l.b;
+ }
+};
diff --git a/content/datastructures/fenwickTree.cpp b/content/datastructures/fenwickTree.cpp
new file mode 100644
index 0000000..eb5cd73
--- /dev/null
+++ b/content/datastructures/fenwickTree.cpp
@@ -0,0 +1,15 @@
+vector<ll> tree;
+
+void update(int i, ll val) {
+ for (i++; i < sz(tree); i += i & -i) tree[i] += val;
+}
+
+void init(int n) {
+ tree.assign(n + 1, 0);
+}
+
+ll prefix_sum(int i) {
+ ll sum = 0;
+ for (i++; i > 0; i -= i & -i) sum += tree[i];
+ return sum;
+}
diff --git a/content/datastructures/fenwickTree2.cpp b/content/datastructures/fenwickTree2.cpp
new file mode 100644
index 0000000..9384e3c
--- /dev/null
+++ b/content/datastructures/fenwickTree2.cpp
@@ -0,0 +1,21 @@
+vector<ll> add, mul;
+
+void update(int l, int r, ll val) {
+ for (int tl = l + 1; tl < sz(add); tl += tl & -tl)
+ add[tl] += val, mul[tl] -= val * l;
+ for (int tr = r + 1; tr < sz(add); tr += tr & -tr)
+ add[tr] -= val, mul[tr] += val * r;
+}
+
+void init(vector<ll>& v) {
+ mul.assign(sz(v) + 1, 0);
+ add.assign(sz(v) + 1, 0);
+ for(int i = 0; i < sz(v); i++) update(i, i + 1, v[i]);
+}
+
+ll prefix_sum(int i) {
+ ll res = 0; i++;
+ for (int ti = i; ti > 0; ti -= ti & -ti)
+ res += add[ti] * i + mul[ti];
+ return res;
+}
diff --git a/content/datastructures/lazyPropagation.cpp b/content/datastructures/lazyPropagation.cpp
new file mode 100644
index 0000000..441590e
--- /dev/null
+++ b/content/datastructures/lazyPropagation.cpp
@@ -0,0 +1,85 @@
+struct SegTree {
+ using T = ll; using U = ll;
+ int n;
+ static constexpr T E = 0; // Neutral element for combine
+ static constexpr U UF = inf; // Unused value by updates
+ vector<T> tree;
+ int h;
+ vector<U> lazy;
+ vector<int> k; // size of segments (optional)
+
+ SegTree(const vector<T>& a) : n(sz(a) + 1), tree(2 * n, E),
+ //SegTree(int size, T def = E) : n(size + 1), tree(2 * n, def),
+ h(__lg(2 * n)), lazy(n, UF), k(2 * n, 1) {
+ copy(all(a), tree.begin() + n);
+ for (int i = n - 1; i > 0; i--) {
+ k[i] = 2 * k[2 * i];
+ tree[i] = comb(tree[2 * i], tree[2 * i + 1]);
+ }}
+
+ T comb(T a, T b) {return a + b;} // Modify this + E
+
+ void apply(int i, U val) { // And this + UF
+ tree[i] = val * k[i];
+ if (i < n) lazy[i] = val; // Don't forget this
+ }
+
+ void push_down(int i) {
+ if (lazy[i] != UF) {
+ apply(2 * i, lazy[i]);
+ apply(2 * i + 1, lazy[i]);
+ lazy[i] = UF;
+ }}
+
+ void push(int i) {
+ for (int s = h; s > 0; s--) push_down(i >> s);
+ }
+
+ void build(int i) {
+ while (i /= 2) {
+ push_down(i);
+ tree[i] = comb(tree[2 * i], tree[2 * i + 1]);
+ }}
+
+ void update(int l, int r, U val) {
+ l += n, r += n;
+ int l0 = l, r0 = r;
+ push(l0), push(r0 - 1);
+ for (; l < r; l /= 2, r /= 2) {
+ if (l&1) apply(l++, val);
+ if (r&1) apply(--r, val);
+ }
+ build(l0), build(r0 - 1);
+ }
+
+ T query(int l, int r) {
+ l += n, r += n;
+ push(l), push(r - 1);
+ T resL = E, resR = E;
+ for (; l < r; l /= 2, r /= 2) {
+ if (l&1) resL = comb(resL, tree[l++]);
+ if (r&1) resR = comb(tree[--r], resR);
+ }
+ return comb(resL, resR);
+ }
+
+ // Optional:
+ int lower_bound(int l, int r, T x) {
+ l += n, r += n;
+ push(l), push(r - 1);
+ int a[64] = {}, lp = 0, rp = 64;
+ for (; l < r; l /= 2, r /= 2) {
+ if (l&1) a[lp++] = l++;
+ if (r&1) a[--rp] = --r;
+ }
+ for (int i : a) if (i != 0 && tree[i] >= x) { // Modify this
+ while (i < n) {
+ push_down(i);
+ if (tree[2 * i] >= x) i = 2 * i; // And this
+ else i = 2 * i + 1;
+ }
+ return i - n;
+ }
+ return -1;
+ }
+};
diff --git a/content/datastructures/lichao.cpp b/content/datastructures/lichao.cpp
new file mode 100644
index 0000000..f66778e
--- /dev/null
+++ b/content/datastructures/lichao.cpp
@@ -0,0 +1,46 @@
+vector<ll> xs; // IMPORTANT: Initialize before constructing!
+int findX(int i) {return lower_bound(all(xs), i) - begin(xs);}
+
+struct Fun { // Default: Linear function. Change as needed.
+ ll m, c;
+ ll operator()(int x) {return m*xs[x] + c;}
+};
+
+// Default: Computes min. Change lines with comment for max.
+struct Lichao {
+ static constexpr Fun id = {0, inf}; // {0, -inf}
+ int n, cap;
+ vector<Fun> seg;
+ Lichao() : n(sz(xs)), cap(2<<__lg(n)), seg(2*cap, id) {}
+
+ void _insert(Fun f, int l, int r, int i) {
+ while (i < 2*cap){
+ int m = (l+r)/2;
+ if (m >= n) {r = m; i = 2*i; continue;}
+ Fun &g = seg[i];
+ if (f(m) < g(m)) swap(f, g); // >
+ if (f(l) < g(l)) r = m, i = 2*i; // >
+ else l = m, i = 2*i+1;
+ }}
+ void insert(Fun f) {_insert(f, 0, cap, 1);}
+
+ void _segmentInsert(Fun f, int l, int r, int a, int b, int i) {
+ if (l <= a && b <= r) _insert(f, a, b, i);
+ else if (a < r && l < b){
+ int m = (a+b)/2;
+ _segmentInsert(f, l, r, a, m, 2*i);
+ _segmentInsert(f, l, r, m, b, 2*i+1);
+ }}
+ void segmentInsert(Fun f, ll l, ll r) {
+ _segmentInsert(f, findX(l), findX(r), 0, cap, 1);
+ }
+
+ ll _query(int x) {
+ ll ans = inf; // -inf
+ for (int i = x + cap; i > 0; i /= 2) {
+ ans = min(ans, seg[i](x)); // max
+ }
+ return ans;
+ }
+ ll query(ll x) {return _query(findX(x));}
+};
diff --git a/content/datastructures/monotonicConvexHull.cpp b/content/datastructures/monotonicConvexHull.cpp
new file mode 100644
index 0000000..44bff83
--- /dev/null
+++ b/content/datastructures/monotonicConvexHull.cpp
@@ -0,0 +1,27 @@
+// Lower Envelope mit MONOTONEN Inserts und Queries. Jede neue
+// Gerade hat kleinere Steigung als alle vorherigen.
+struct Line {
+ ll m, b;
+ ll operator()(ll x) {return m*x+b;}
+};
+
+vector<Line> ls;
+int ptr = 0;
+
+bool bad(Line l1, Line l2, Line l3) {
+ return (l3.b-l1.b)*(l1.m-l2.m) < (l2.b-l1.b)*(l1.m-l3.m);
+}
+
+void add(ll m, ll b) { // Laufzeit O(1) amortisiert
+ while (sz(ls) > 1 && bad(ls.end()[-2], ls.end()[-1], {m, b})) {
+ ls.pop_back();
+ }
+ ls.push_back({m, b});
+ ptr = min(ptr, sz(ls) - 1);
+}
+
+ll query(ll x) { // Laufzeit: O(1) amortisiert
+ ptr = min(ptr, sz(ls) - 1);
+ while (ptr < sz(ls)-1 && ls[ptr + 1](x) < ls[ptr](x)) ptr++;
+ return ls[ptr](x);
+} \ No newline at end of file
diff --git a/content/datastructures/pbds.cpp b/content/datastructures/pbds.cpp
new file mode 100644
index 0000000..f0889a2
--- /dev/null
+++ b/content/datastructures/pbds.cpp
@@ -0,0 +1,18 @@
+#include <ext/pb_ds/assoc_container.hpp>
+using namespace __gnu_pbds;
+template<typename T>
+using Tree = tree<T, null_type, less<T>, rb_tree_tag,
+ tree_order_statistics_node_update>;
+// T.order_of_key(x): number of elements strictly less than x
+// *T.find_by_order(k): k-th element
+
+template<typename T>
+struct chash {
+ static const uint64_t C = ll(2e18 * acos(-1)) | 199; // random odd
+ size_t operator()(T o) const {
+ return __builtin_bswap64(hash<T>()(o) * C);
+}};
+template<typename K, typename V>
+using hashMap = gp_hash_table<K, V, chash<K>>;
+template<typename T>
+using hashSet = gp_hash_table<T, null_type, chash<T>>;
diff --git a/content/datastructures/persistent.cpp b/content/datastructures/persistent.cpp
new file mode 100644
index 0000000..4093cdc
--- /dev/null
+++ b/content/datastructures/persistent.cpp
@@ -0,0 +1,18 @@
+template<typename T>
+struct persistent {
+ int& time;
+ vector<pair<int, T>> data;
+
+ persistent(int& time, T value = {})
+ : time(time), data(1, {time, value}) {}
+
+ T get(int t) {
+ return prev(upper_bound(all(data), pair{t+1, T{}}))->second;
+ }
+
+ int set(T value) {
+ time += 2;
+ data.push_back({time, value});
+ return time;
+ }
+};
diff --git a/content/datastructures/persistentArray.cpp b/content/datastructures/persistentArray.cpp
new file mode 100644
index 0000000..60d8b17
--- /dev/null
+++ b/content/datastructures/persistentArray.cpp
@@ -0,0 +1,24 @@
+template<typename T>
+struct persistentArray {
+ int time;
+ vector<persistent<T>> data;
+ vector<pair<int, int>> mods;
+
+ persistentArray(int n, T value = {})
+ : time(0), data(n, {time, value}) {}
+
+ T get(int p, int t) {return data[p].get(t);}
+
+ int set(int p, T value) {
+ mods.push_back({p, time});
+ return data[p].set(value);
+ }
+
+ void reset(int t) {
+ while (!mods.empty() && mods.back().second > t) {
+ data[mods.back().first].data.pop_back();
+ mods.pop_back();
+ }
+ time = t;
+ }
+};
diff --git a/content/datastructures/segmentTree.cpp b/content/datastructures/segmentTree.cpp
new file mode 100644
index 0000000..6b69d0b
--- /dev/null
+++ b/content/datastructures/segmentTree.cpp
@@ -0,0 +1,42 @@
+struct SegTree {
+ using T = ll;
+ int n;
+ vector<T> tree;
+ static constexpr T E = 0; // Neutral element for combine
+
+ SegTree(vector<T>& a) : n(sz(a)), tree(2 * n) {
+ //SegTree(int size, T val = E) : n(size), tree(2 * n, val) {
+ copy(all(a), tree.begin() + n);
+ for (int i = n - 1; i > 0; i--) { // remove for range update
+ tree[i] = comb(tree[2 * i], tree[2 * i + 1]);
+ }}
+
+ T comb(T a, T b) {return a + b;} // modify this + neutral
+
+ void update(int i, T val) {
+ tree[i += n] = val; // apply update code
+ while (i /= 2) tree[i] = comb(tree[2 * i], tree[2 * i + 1]);
+ }
+
+ T query(int l, int r) {
+ T resL = E, resR = E;
+ for (l += n, r += n; l < r; l /= 2, r /= 2) {
+ if (l&1) resL = comb(resL, tree[l++]);
+ if (r&1) resR = comb(tree[--r], resR);
+ }
+ return comb(resL, resR);
+ }
+
+ // OR: range update + point query, needs commutative comb
+ void modify(int l, int r, T val) {
+ for (l += n, r += n; l < r; l /= 2, r /= 2) {
+ if (l&1) tree[l] = comb(tree[l], val), l++;
+ if (r&1) --r, tree[r] = comb(tree[r], val);
+ }}
+
+ T query(int i) {
+ T res = E;
+ for (i += n; i > 0; i /= 2) res = comb(res, tree[i]);
+ return res;
+ }
+};
diff --git a/content/datastructures/sparseTable.cpp b/content/datastructures/sparseTable.cpp
new file mode 100644
index 0000000..b3f946e
--- /dev/null
+++ b/content/datastructures/sparseTable.cpp
@@ -0,0 +1,24 @@
+struct SparseTable {
+ vector<vector<int>> st;
+ ll *a;
+
+ int better(int lidx, int ridx) {
+ return a[lidx] <= a[ridx] ? lidx : ridx;
+ }
+
+ void init(vector<ll>* vec) {
+ int n = sz(*vec);
+ a = vec->data();
+ st.assign(__lg(n) + 1, vector<int>(n));
+ iota(all(st[0]), 0);
+ for (int j = 0; (2 << j) <= n; j++) {
+ for (int i = 0; i + (2 << j) <= n; i++) {
+ st[j + 1][i] = better(st[j][i] , st[j][i + (1 << j)]);
+ }}}
+
+ int queryIdempotent(int l, int r) {
+ if (r <= l) return -1;
+ int j = __lg(r - l); //31 - builtin_clz(r - l);
+ return better(st[j][l] , st[j][r - (1 << j)]);
+ }
+};
diff --git a/content/datastructures/sparseTableDisjoint.cpp b/content/datastructures/sparseTableDisjoint.cpp
new file mode 100644
index 0000000..55165d4
--- /dev/null
+++ b/content/datastructures/sparseTableDisjoint.cpp
@@ -0,0 +1,27 @@
+struct DisjointST {
+ static constexpr ll neutral = 0;
+ vector<vector<ll>> dst;
+ ll* a;
+
+ ll combine(const ll& x, const ll& y) {
+ return x + y;
+ }
+
+ void init(vector<ll>* vec) {
+ int n = sz(*vec);
+ a = vec->data();
+ dst.assign(__lg(n) + 1, vector<ll>(n + 1, neutral));
+ for (int h = 0, l = 1; l <= n; h++, l *= 2) {
+ for (int c = l; c < n + l; c += 2 * l) {
+ for (int i = c; i < min(n, c + l); i++)
+ dst[h][i + 1] = combine(dst[h][i], vec->at(i));
+ for (int i = min(n, c); i > c - l; i--)
+ dst[h][i - 1] = combine(vec->at(i - 1), dst[h][i]);
+ }}}
+
+ ll query(int l, int r) {
+ if (r <= l) return neutral;
+ int h = __lg(l ^ r);
+ return combine(dst[h][l], dst[h][r]);
+ }
+};
diff --git a/content/datastructures/stlHashMap.cpp b/content/datastructures/stlHashMap.cpp
new file mode 100644
index 0000000..b107dde
--- /dev/null
+++ b/content/datastructures/stlHashMap.cpp
@@ -0,0 +1,17 @@
+#include <ext/pb_ds/assoc_container.hpp>
+using namespace __gnu_pbds;
+
+template<typename T>
+struct betterHash {
+ size_t operator()(T o) const {
+ size_t h = hash<T>()(o) ^ 42394245; //random value
+ h = ((h >> 16) ^ h) * 0x45d9f3b;
+ h = ((h >> 16) ^ h) * 0x45d9f3b;
+ h = ((h >> 16) ^ h);
+ return h;
+}};
+
+template<typename K, typename V, typename H = betterHash<K>>
+using hashMap = gp_hash_table<K, V, H>;
+template<typename K, typename H = betterHash<K>>
+using hashSet = gp_hash_table<K, null_type, H>;
diff --git a/content/datastructures/stlPriorityQueue.cpp b/content/datastructures/stlPriorityQueue.cpp
new file mode 100644
index 0000000..32b2455
--- /dev/null
+++ b/content/datastructures/stlPriorityQueue.cpp
@@ -0,0 +1,8 @@
+#include <ext/pb_ds/priority_queue.hpp>
+template<typename T>
+using pQueue = __gnu_pbds::priority_queue<T>; //<T, greater<T>>
+
+auto it = pq.push(5);
+pq.modify(it, 6);
+pq.join(pq2);
+// push, join are O(1), pop, modify, erase O(log n) amortized
diff --git a/content/datastructures/stlRope.cpp b/content/datastructures/stlRope.cpp
new file mode 100644
index 0000000..804cd67
--- /dev/null
+++ b/content/datastructures/stlRope.cpp
@@ -0,0 +1,8 @@
+#include <ext/rope>
+using namespace __gnu_cxx;
+rope<int> v; // Wie normaler Container.
+v.push_back(num); // O(log(n))
+rope<int> sub = v.substr(start, length); // O(log(n))
+v.erase(start, length); // O(log(n))
+v.insert(v.mutable_begin() + offset, sub); // O(log(n))
+for(auto it = v.mutable_begin(); it != v.mutable_end(); it++)
diff --git a/content/datastructures/stlTree.cpp b/content/datastructures/stlTree.cpp
new file mode 100644
index 0000000..fbb68b9
--- /dev/null
+++ b/content/datastructures/stlTree.cpp
@@ -0,0 +1,13 @@
+#include <ext/pb_ds/assoc_container.hpp>
+#include <ext/pb_ds/tree_policy.hpp>
+using namespace std; using namespace __gnu_pbds;
+template<typename T>
+using Tree = tree<T, null_type, less<T>, rb_tree_tag,
+ tree_order_statistics_node_update>;
+
+int main() {
+ Tree<int> X;
+ for (int i : {1, 2, 4, 8, 16}) X.insert(i);
+ *X.find_by_order(3); // => 8
+ X.order_of_key(10); // => 4 = min i, mit X[i] >= 10
+}
diff --git a/content/datastructures/treap.cpp b/content/datastructures/treap.cpp
new file mode 100644
index 0000000..c96e36a
--- /dev/null
+++ b/content/datastructures/treap.cpp
@@ -0,0 +1,79 @@
+struct node {
+ int key, prio, left, right, size;
+ node(int key, int prio) : key(key), prio(prio), left(-1),
+ right(-1), size(1) {};
+};
+
+vector<node> treap;
+
+int getSize(int root) {
+ return root < 0 ? 0 : treap[root].size;
+}
+
+void update(int root) {
+ if (root < 0) return;
+ treap[root].size = 1 + getSize(treap[root].left)
+ + getSize(treap[root].right);
+}
+
+pair<int, int> split(int root, int minKeyRight) {
+ if (root < 0) return {-1, -1};
+ if (treap[root].key >= minKeyRight) {
+ auto leftSplit = split(treap[root].left, minKeyRight);
+ treap[root].left = leftSplit.second;
+ update(root);
+ leftSplit.second = root;
+ return leftSplit;
+ } else {
+ auto rightSplit = split(treap[root].right, minKeyRight);
+ treap[root].right = rightSplit.first;
+ update(root);
+ rightSplit.first = root;
+ return rightSplit;
+}}
+
+int merge (int left, int right) {
+ if (left < 0) return right;
+ if (right < 0) return left;
+ if (treap[left].prio < treap[right].prio) { //min priority heap
+ treap[left].right = merge(treap[left].right, right);
+ update(left);
+ return left;
+ } else {
+ treap[right].left = merge(left, treap[right].left);
+ update(right);
+ return right;
+}}
+
+//insert values with high priority first
+int insert(int root, int key, int prio) {
+ int next = sz(treap);
+ treap.emplace_back(key, prio);
+ auto t = split(root, key);
+ //returns new root
+ return merge(merge(t.first, next), t.second);
+}
+
+int remove(int root, int key) {
+ if (root < 0) return -1;
+ if (key < treap[root].key) {
+ treap[root].left = remove(treap[root].left, key);
+ update(root);
+ return root;
+ } else if (key > treap[root].key) {
+ treap[root].right = remove(treap[root].right, key);
+ update(root);
+ return root;
+ } else { //check prio?
+ return merge(treap[root].left, treap[root].right);
+}}
+
+int kth(int root, int k) {
+ if (root < 0) return -1;
+ int leftSize = getSize(treap[root].left);
+ if (k < leftSize) return kth(treap[root].left, k);
+ else if (k > leftSize) {
+ return kth(treap[root].right, k - 1 - leftSize);
+ }
+ return root;
+}
diff --git a/content/datastructures/treap2.cpp b/content/datastructures/treap2.cpp
new file mode 100644
index 0000000..c5a60e9
--- /dev/null
+++ b/content/datastructures/treap2.cpp
@@ -0,0 +1,79 @@
+mt19937 rng(0xc4bd5dad);
+struct Treap {
+ struct Node {
+ ll val;
+ int prio, size = 1, l = -1, r = -1;
+ Node(ll x) : val(x), prio(rng()) {}
+ };
+
+ vector<Node> treap;
+ int root = -1;
+
+ int getSize(int v) {
+ return v < 0 ? 0 : treap[v].size;
+ }
+
+ void upd(int v) {
+ if (v < 0) return;
+ auto& V = treap[v];
+ V.size = 1 + getSize(V.l) + getSize(V.r);
+ // Update Node Code
+ }
+
+ void push(int v) {
+ if (v < 0) return;
+ //auto& V = treap[v];
+ //if (V.lazy) {
+ // Lazy Propagation Code
+ // if (V.l >= 0) treap[V.l].lazy = true;
+ // if (V.r >= 0) treap[V.r].lazy = true;
+ // V.lazy = false;
+ //}
+ }
+
+ pair<int, int> split(int v, int k) {
+ if (v < 0) return {-1, -1};
+ auto& V = treap[v];
+ push(v);
+ if (getSize(V.l) >= k) { // "V.val >= k" for lower_bound(k)
+ auto [left, right] = split(V.l, k);
+ V.l = right;
+ upd(v);
+ return {left, v};
+ } else {
+ // and only "k"
+ auto [left, right] = split(V.r, k - getSize(V.l) - 1);
+ V.r = left;
+ upd(v);
+ return {v, right};
+ }}
+
+ int merge(int left, int right) {
+ if (left < 0) return right;
+ if (right < 0) return left;
+ if (treap[left].prio < treap[right].prio) {
+ push(left);
+ treap[left].r = merge(treap[left].r, right);
+ upd(left);
+ return left;
+ } else {
+ push(right);
+ treap[right].l = merge(left, treap[right].l);
+ upd(right);
+ return right;
+ }}
+
+ void insert(int i, ll val) { // and i = val
+ auto [left, right] = split(root, i);
+ treap.emplace_back(val);
+ left = merge(left, sz(treap) - 1);
+ root = merge(left, right);
+ }
+
+ void remove(int i, int count = 1) {
+ auto [left, t_right] = split(root, i);
+ auto [middle, right] = split(t_right, count);
+ root = merge(left, right);
+ }
+ // for query use remove and read middle BEFORE remerging
+};
diff --git a/content/datastructures/unionFind.cpp b/content/datastructures/unionFind.cpp
new file mode 100644
index 0000000..dd5a569
--- /dev/null
+++ b/content/datastructures/unionFind.cpp
@@ -0,0 +1,26 @@
+// unions[i] >= 0 => unions[i] = parent
+// unions[i] < 0 => unions[i] = -size
+vector<int> unions;
+
+void init(int n) { //Initialisieren
+ unions.assign(n, -1);
+}
+
+int findSet(int a) { // Pfadkompression
+ if (unions[a] < 0) return a;
+ return unions[a] = findSet(unions[a]);
+}
+
+void linkSets(int a, int b) { // Union by size.
+ if (unions[b] > unions[a]) swap(a, b);
+ unions[b] += unions[a];
+ unions[a] = b;
+}
+
+void unionSets(int a, int b) { // Diese Funktion aufrufen.
+ if (findSet(a) != findSet(b)) linkSets(findSet(a), findSet(b));
+}
+
+int size(int a) {
+ return -unions[findSet(a)];
+}
diff --git a/content/datastructures/waveletTree.cpp b/content/datastructures/waveletTree.cpp
new file mode 100644
index 0000000..090cdb2
--- /dev/null
+++ b/content/datastructures/waveletTree.cpp
@@ -0,0 +1,40 @@
+struct WaveletTree {
+ using it = vector<ll>::iterator;
+ WaveletTree *ln = nullptr, *rn = nullptr;
+ vector<int> b = {0};
+ ll lo, hi;
+
+ WaveletTree(vector<ll> in) : WaveletTree(all(in)) {}
+
+ WaveletTree(it from, it to) : // call above one
+ lo(*min_element(from, to)), hi(*max_element(from, to) + 1) {
+ ll mid = (lo + hi) / 2;
+ auto f = [&](ll x) {return x < mid;};
+ for (it c = from; c != to; c++) {
+ b.push_back(b.back() + f(*c));
+ }
+ if (lo + 1 >= hi) return;
+ it pivot = stable_partition(from, to, f);
+ ln = new WaveletTree(from, pivot);
+ rn = new WaveletTree(pivot, to);
+ }
+
+ // kth element in sort[l, r) all 0-indexed
+ ll kth(int l, int r, int k) {
+ if (k < 0 || l + k >= r) return -1;
+ if (lo + 1 >= hi) return lo;
+ int inLeft = b[r] - b[l];
+ if (k < inLeft) return ln->kth(b[l], b[r], k);
+ else return rn->kth(l-b[l], r-b[r], k-inLeft);
+ }
+
+ // count elements in[l, r) smaller than k
+ int countSmaller(int l, int r, ll k) {
+ if (l >= r || k <= lo) return 0;
+ if (hi <= k) return r - l;
+ return ln->countSmaller(b[l], b[r], k) +
+ rn->countSmaller(l-b[l], r-b[r], k);
+ }
+
+ ~WaveletTree() {delete ln; delete rn;}
+};
diff --git a/content/geometry/antipodalPoints.cpp b/content/geometry/antipodalPoints.cpp
new file mode 100644
index 0000000..110cc74
--- /dev/null
+++ b/content/geometry/antipodalPoints.cpp
@@ -0,0 +1,12 @@
+vector<pair<int, int>> antipodalPoints(vector<pt>& h) {
+ if (sz(h) < 2) return {};
+ vector<pair<int, int>> result;
+ for (int i = 0, j = 1; i < j; i++) {
+ while (true) {
+ result.push_back({i, j});
+ if (cross(h[(i + 1) % sz(h)] - h[i],
+ h[(j + 1) % sz(h)] - h[j]) <= 0) break;
+ j = (j + 1) % sz(h);
+ }}
+ return result;
+}
diff --git a/content/geometry/circle.cpp b/content/geometry/circle.cpp
new file mode 100644
index 0000000..6789c52
--- /dev/null
+++ b/content/geometry/circle.cpp
@@ -0,0 +1,33 @@
+// berechnet die Schnittpunkte von zwei Kreisen
+// (Kreise dürfen nicht gleich sein!)
+vector<pt> circleIntersection(pt c1, double r1,
+ pt c2, double r2) {
+ double d = abs(c1 - c2);
+ if (d < abs(r1 - r2) || d > abs(r1 + r2)) return {};
+ double a = (r1 * r1 - r2 * r2 + d * d) / (2 * d);
+ pt p = (c2 - c1) * a / d + c1;
+ if (d == abs(r1 - r2) || d == abs(r1 + r2)) return {p};
+ double h = sqrt(r1 * r1 - a * a);
+ return {p + pt{0, 1} * (c2 - c1) * h / d,
+ p - pt{0, 1} * (c2 - c1) * h / d};
+}
+
+// berechnet die Schnittpunkte zwischen
+// einem Kreis(Kugel) und einem Strahl (2D und 3D)
+vector<pt> circleRayIntersection(pt center, double r,
+ pt orig, pt dir) {
+ vector<pt> result;
+ double a = norm(dir);
+ double b = 2 * dot(dir, orig - center);
+ double c = norm(orig - center) - r * r;
+ double discr = b * b - 4 * a * c;
+ if (discr >= 0) {
+ //t in [0, 1] => schnitt mit Segment [orig, orig + dir]
+ double t1 = -(b + sqrt(discr)) / (2 * a);
+ double t2 = -(b - sqrt(discr)) / (2 * a);
+ if (t1 >= 0) result.push_back(t1 * dir + orig);
+ if (t2 >= 0 && abs(t1 - t2) > EPS) {
+ result.push_back(t2 * dir + orig);
+ }}
+ return result;
+}
diff --git a/content/geometry/closestPair.cpp b/content/geometry/closestPair.cpp
new file mode 100644
index 0000000..9b115f3
--- /dev/null
+++ b/content/geometry/closestPair.cpp
@@ -0,0 +1,27 @@
+ll rec(vector<pt>::iterator a, int l, int r) {
+ if (r - l < 2) return INF;
+ int m = (l + r) / 2;
+ ll midx = a[m].real();
+ ll ans = min(rec(a, l, m), rec(a, m, r));
+
+ inplace_merge(a+l, a+m, a+r, [](const pt& x, const pt& y) {
+ return x.imag() < y.imag();
+ });
+
+ pt tmp[8];
+ fill(all(tmp), a[l]);
+ for (int i = l + 1, next = 0; i < r; i++) {
+ if (ll x = a[i].real() - midx; x * x < ans) {
+ for (pt& p : tmp) ans = min(ans, norm(p - a[i]));
+ tmp[next++ & 7] = a[i];
+ }
+ }
+ return ans;
+}
+
+ll shortestDist(vector<pt> a) { // sz(pts) > 1
+ sort(all(a), [](const pt& x, const pt& y) {
+ return x.real() < y.real();
+ });
+ return rec(a.begin(), 0, sz(a));
+}
diff --git a/content/geometry/convexHull.cpp b/content/geometry/convexHull.cpp
new file mode 100644
index 0000000..6d89e05
--- /dev/null
+++ b/content/geometry/convexHull.cpp
@@ -0,0 +1,18 @@
+vector<pt> convexHull(vector<pt> pts){
+ sort(all(pts), [](const pt& a, const pt& b){
+ return real(a) == real(b) ? imag(a) < imag(b)
+ : real(a) < real(b);
+ });
+ pts.erase(unique(all(pts)), pts.end());
+ int k = 0;
+ vector<pt> h(2 * sz(pts));
+ auto half = [&](auto begin, auto end, int t) {
+ for (auto it = begin; it != end; it++) {
+ while (k > t && cross(h[k-2], h[k-1], *it) <= 0) k--;
+ h[k++] = *it;
+ }};
+ half(all(pts), 1);// Untere Hülle.
+ half(next(pts.rbegin()), pts.rend(), k);// Obere Hülle.
+ h.resize(k);
+ return h;
+}
diff --git a/content/geometry/delaunay.cpp b/content/geometry/delaunay.cpp
new file mode 100644
index 0000000..c813892
--- /dev/null
+++ b/content/geometry/delaunay.cpp
@@ -0,0 +1,124 @@
+using lll = __int128;
+using pt = complex<lll>;
+
+constexpr pt INF_PT = pt(2e18, 2e18);
+
+bool circ(pt p, pt a, pt b, pt c) {// p in circle(A,B,C), ABC must be ccw
+ return imag((c-b)*conj(p-c)*(a-p)*conj(b-a)) < 0;
+}
+
+struct QuadEdge {
+ QuadEdge* rot = nullptr;
+ QuadEdge* onext = nullptr;
+ pt orig = INF_PT;
+ bool used = false;
+ QuadEdge* rev() const {return rot->rot;}
+ QuadEdge* lnext() const {return rot->rev()->onext->rot;}
+ QuadEdge* oprev() const {return rot->onext->rot;}
+ pt dest() const {return rev()->orig;}
+};
+
+deque<QuadEdge> edgeData;
+
+QuadEdge* makeEdge(pt from, pt to) {
+ for (int _ : {0,1,2,3}) edgeData.push_back({});
+ auto e = edgeData.end() - 4;
+ for (int j : {0,1,2,3}) e[j].onext = e[j^3].rot = &e[j^(j>>1)];
+ e[0].orig = from;
+ e[1].orig = to;
+ return &e[0];
+}
+
+void splice(QuadEdge* a, QuadEdge* b) {
+ swap(a->onext->rot->onext, b->onext->rot->onext);
+ swap(a->onext, b->onext);
+}
+
+QuadEdge* connect(QuadEdge* a, QuadEdge* b) {
+ QuadEdge* e = makeEdge(a->dest(), b->orig);
+ splice(e, a->lnext());
+ splice(e->rev(), b);
+ return e;
+}
+
+bool valid(QuadEdge* e, QuadEdge* base) {
+ return cross(e->dest(), base->orig, base->dest()) < 0;
+}
+
+template<bool ccw>
+QuadEdge* deleteAll(QuadEdge* e, QuadEdge* base) {
+ if (valid(e, base)) {
+ while (circ(base->dest(), base->orig, e->dest(), (ccw ? e->onext : e->oprev())->dest())) {
+ QuadEdge* t = ccw ? e->onext : e->oprev();
+ splice(e, e->oprev());
+ splice(e->rev(), e->rev()->oprev());
+ e = t;
+ }}
+ return e;
+}
+
+template<typename IT>
+pair<QuadEdge*, QuadEdge*> rec(IT l, IT r) {
+ int n = distance(l, r);
+ if (n <= 3) {
+ QuadEdge* a = makeEdge(l[0], l[1]);
+ if (n == 2) return {a, a->rev()};
+ QuadEdge* b = makeEdge(l[1], l[2]);
+ splice(a->rev(), b);
+ auto side = cross(l[0], l[1], l[2]);
+ QuadEdge* c = nullptr;
+ if (side != 0) c = connect(b, a);
+ if (side >= 0) return {a, b->rev()};
+ else return {c->rev(), c};
+ }
+ auto m = l + (n / 2);
+ auto [ldo, ldi] = rec(l, m);
+ auto [rdi, rdo] = rec(m, r);
+ while (true) {
+ if (cross(rdi->orig, ldi->orig, ldi->dest()) > 0) {
+ ldi = ldi->lnext();
+ } else if (cross(ldi->orig, rdi->orig, rdi->dest()) < 0) {
+ rdi = rdi->rev()->onext;
+ } else break;
+ }
+ QuadEdge* base = connect(rdi->rev(), ldi);
+ if (ldi->orig == ldo->orig) ldo = base->rev();
+ if (rdi->orig == rdo->orig) rdo = base;
+ while (true) {
+ QuadEdge* lcand = deleteAll<true>(base->rev()->onext, base);
+ QuadEdge* rcand = deleteAll<false>(base->oprev(), base);
+ if (!valid(lcand, base) && !valid(rcand, base)) break;
+ if (!valid(lcand, base) || (valid(rcand, base) &&
+ circ(lcand->dest(), lcand->orig, rcand->orig, rcand->dest()))) {
+ base = connect(rcand, base->rev());
+ } else {
+ base = connect(base->rev(), lcand->rev());
+ }}
+ return {ldo, rdo};
+}
+
+vector<pt> delaunay(vector<pt> pts) {
+ if (sz(pts) <= 2) return {};
+ sort(all(pts), [](const pt& a, const pt& b) {
+ if (real(a) != real(b)) return real(a) < real(b);
+ return imag(a) < imag(b);
+ });
+ QuadEdge* r = rec(all(pts)).first;
+ vector<QuadEdge*> edges = {r};
+ while (cross(r->onext->dest(), r->dest(), r->orig) < 0) r = r->onext;
+ auto add = [&](QuadEdge* e){
+ QuadEdge* cur = e;
+ do {
+ cur->used = true;
+ pts.push_back(cur->orig);
+ edges.push_back(cur->rev());
+ cur = cur->lnext();
+ } while (cur != e);
+ };
+ add(r);
+ pts.clear();
+ for (int i = 0; i < sz(edges); i++) {
+ if (!edges[i]->used) add(edges[i]);
+ }
+ return pts;
+}
diff --git a/content/geometry/formulas.cpp b/content/geometry/formulas.cpp
new file mode 100644
index 0000000..5d4e10d
--- /dev/null
+++ b/content/geometry/formulas.cpp
@@ -0,0 +1,42 @@
+// Komplexe Zahlen als Punkte. Wenn immer möglich complex<ll>
+// verwenden. Funktionen wie abs() geben dann aber ll zurück.
+using pt = complex<double>;
+
+constexpr double PIU = acos(-1.0l); // PIL < PI < PIU
+constexpr double PIL = PIU-2e-19l;
+
+// Winkel zwischen Punkt und x-Achse in [-PI, PI].
+double angle(pt a) {return arg(a);}
+
+// rotiert Punkt im Uhrzeigersinn um den Ursprung.
+pt rotate(pt a, double theta) {return a * polar(1.0, theta);}
+
+// Skalarprodukt.
+auto dot(pt a, pt b) {return real(conj(a) * b);}
+
+// abs()^2.(pre c++20)
+auto norm(pt a) {return dot(a, a);}
+
+// Kreuzprodukt, 0, falls kollinear.
+auto cross(pt a, pt b) {return imag(conj(a) * b);}
+auto cross(pt p, pt a, pt b) {return cross(a - p, b - p);}
+
+// 1 => c links von a->b
+// 0 => a, b und c kolliniear
+// -1 => c rechts von a->b
+int ccw(pt a, pt b, pt c) {
+ auto orien = cross(b - a, c - a);
+ return (orien > EPS) - (orien < -EPS);
+}
+
+// Liegt d in der gleichen Ebene wie a, b, und c?
+bool isCoplanar(pt a, pt b, pt c, pt d) {
+ return abs((b - a) * (c - a) * (d - a)) < EPS;
+}
+
+// charakterisiert winkel zwischen Vektoren u und v
+pt uniqueAngle(pt u, pt v) {
+ pt tmp = v * conj(u);
+ ll g = abs(gcd(real(tmp), imag(tmp)));
+ return tmp / g;
+}
diff --git a/content/geometry/formulas3d.cpp b/content/geometry/formulas3d.cpp
new file mode 100644
index 0000000..dee3ce8
--- /dev/null
+++ b/content/geometry/formulas3d.cpp
@@ -0,0 +1,53 @@
+// Skalarprodukt
+auto operator|(pt3 a, pt3 b) {
+ return a.x * b.x + a.y*b.y + a.z*b.z;
+}
+auto dot(pt3 a, pt3 b) {return a|b;}
+
+// Kreuzprodukt
+pt3 operator*(pt3 a, pt3 b) {return {a.y*b.z - a.z*b.y,
+ a.z*b.x - a.x*b.z,
+ a.x*b.y - a.y*b.x};}
+pt3 cross(pt3 a, pt3 b) {return a*b;}
+
+// Länge von a
+double abs(pt3 a) {return sqrt(dot(a, a));}
+double abs(pt3 a, pt3 b) {return abs(b - a);}
+
+// Mixedprodukt
+auto mixed(pt3 a, pt3 b, pt3 c) {return a*b|c;};
+
+// orientierung von p zu der Ebene durch a, b, c
+// -1 => gegen den Uhrzeigersinn,
+// 0 => kolliniear,
+// 1 => im Uhrzeigersinn.
+int ccw(pt3 a, pt3 b, pt3 c, pt3 p) {
+ auto orien = mixed(b - a, c - a, p - a);
+ return (orien > EPS) - (orien < -EPS);
+}
+
+// Entfernung von Punkt p zur Ebene a,b,c.
+double distToPlane(pt3 a, pt3 b, pt3 c, pt3 p) {
+ pt3 n = cross(b-a, c-a);
+ return (abs(dot(n, p)) - dot(n, a)) / abs(n);
+}
+
+// Liegt p in der Ebene a,b,c?
+bool pointOnPlane(pt3 a, pt3 b, pt3 c, pt3 p) {
+ return ccw(a, b, c, p) == 0;
+}
+
+// Schnittpunkt von der Grade a-b und der Ebene c,d,e
+// die Grade darf nicht parallel zu der Ebene sein!
+pt3 linePlaneIntersection(pt3 a, pt3 b, pt3 c, pt3 d, pt3 e) {
+ pt3 n = cross(d-c, e-c);
+ pt3 d = b - a;
+ return a - d * (dot(n, a) - dot(n, c)) / dot(n, d);
+}
+
+// Abstand zwischen der Grade a-b und c-d
+double lineLineDist(pt3 a, pt3 b, pt3 c, pt3 d) {
+ pt3 n = cross(b - a, d - c);
+ if (abs(n) < EPS) return distToLine(a, b, c);
+ return abs(dot(a - c, n)) / abs(n);
+}
diff --git a/content/geometry/geometry.tex b/content/geometry/geometry.tex
new file mode 100644
index 0000000..92285c4
--- /dev/null
+++ b/content/geometry/geometry.tex
@@ -0,0 +1,62 @@
+\section{Geometrie}
+
+\begin{algorithm}{Closest Pair}
+ \begin{methods}
+ \method{shortestDist}{kürzester Abstand zwischen Punkten}{n\*\log(n)}
+ \end{methods}
+ \sourcecode{geometry/closestPair.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Konvexehülle}
+ \begin{methods}
+ \method{convexHull}{berechnet konvexe Hülle}{n\*\log(n)}
+ \end{methods}
+ \begin{itemize}
+ \item konvexe Hülle gegen den Uhrzeigersinn sortiert
+ \item nur Eckpunkte enthalten(für alle Punkte = im CCW Test entfernen)
+ \item erster und letzter Punkt sind identisch
+ \end{itemize}
+ \sourcecode{geometry/convexHull.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Rotating calipers}
+ \begin{methods}
+ \method{antipodalPoints}{berechnet antipodale Punkte}{n}
+ \end{methods}
+ \textbf{WICHTIG:} Punkte müssen gegen den Uhrzeigersinn sortiert sein und konvexes Polygon bilden!
+ \sourcecode{geometry/antipodalPoints.cpp}
+\end{algorithm}
+
+\subsection{Formeln~~--~\texttt{std::complex}}
+\sourcecode{geometry/formulas.cpp}
+\sourcecode{geometry/linesAndSegments.cpp}
+\sourcecode{geometry/sortAround.cpp}
+\input{geometry/triangle}
+\sourcecode{geometry/triangle.cpp}
+\sourcecode{geometry/polygon.cpp}
+\sourcecode{geometry/circle.cpp}
+
+\subsection{Formeln -- 3D}
+\sourcecode{geometry/formulas3d.cpp}
+
+\optional{
+ \subsection{3D-Kugeln}
+ \sourcecode{geometry/spheres.cpp}
+}
+
+\begin{algorithm}{Half-plane intersection}
+ \sourcecode{geometry/hpi.cpp}
+\end{algorithm}
+
+\begin{algorithm}[optional]{Delaunay Triangulierung}
+ \begin{methods}
+ \method{delaunay}{berechnet Triangulierung}{n\*\log(n)}
+ \end{methods}
+ \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Traingulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig.
+ \sourcecode{geometry/delaunay.cpp}
+\end{algorithm}
+
+\optional{
+\subsection{Geraden}
+\sourcecode{geometry/lines.cpp}
+}
diff --git a/content/geometry/hpi.cpp b/content/geometry/hpi.cpp
new file mode 100644
index 0000000..3509e0e
--- /dev/null
+++ b/content/geometry/hpi.cpp
@@ -0,0 +1,68 @@
+constexpr ll inf = 0x1FFF'FFFF'FFFF'FFFF;//THIS CODE IS WIP
+
+bool left(pt p) {return real(p) < 0 ||
+ (real(p) == 0 && imag(p) < 0);}
+struct hp {
+ pt from, to;
+
+ hp(pt a, pt b) : from(a), to(b) {}
+ hp(pt dummy) : hp(dummy, dummy) {}
+
+ bool dummy() const {return from == to;}
+ pt dir() const {return dummy() ? to : to - from;}
+ bool operator<(const hp& o) const {
+ if (left(dir()) != left(o.dir()))
+ return left(dir()) > left(o.dir());
+ return cross(dir(), o.dir()) > 0;
+ }
+
+ using lll = __int128;
+ using ptl = complex<lll>;
+ ptl mul(lll m, ptl p) const {return m*p;}//ensure 128bit
+
+ bool check(const hp& a, const hp& b) const {
+ if (dummy() || b.dummy()) return false;
+ if (a.dummy()) {
+ ll ort = sgn(cross(b.dir(), dir()));
+ if (ort == 0) return cross(from, to, a.from) < 0;
+ return cross(b.dir(), a.dir()) * ort > 0;
+ }
+ ll y = cross(a.dir(), b.dir());
+ ll z = cross(b.from - a.from, b.dir());
+ ptl i = mul(y, a.from) + mul(z, a.dir()); //intersect a and b
+ // check if i is outside/right of x
+ return imag(conj(mul(sgn(y),dir()))*(i-mul(y,from))) < 0;
+ }
+};
+
+constexpr ll lim = 2e9+7;
+
+deque<hp> intersect(vector<hp> hps) {
+ hps.push_back(hp(pt{lim+1,-1}));
+ hps.push_back(hp(pt{lim+1,1}));
+ sort(all(hps));
+
+ deque<hp> dq = {hp(pt{-lim, 1})};
+ for (auto x : hps) {
+ while (sz(dq) > 1 && x.check(dq.end()[-1], dq.end()[-2]))
+ dq.pop_back();
+ while (sz(dq) > 1 && x.check(dq[0], dq[1]))
+ dq.pop_front();
+
+ if (cross(x.dir(), dq.back().dir()) == 0) {
+ if (dot(x.dir(), dq.back().dir()) < 0) return {};
+ if (cross(x.from, x.to, dq.back().from) < 0)
+ dq.pop_back();
+ else continue;
+ }
+ dq.push_back(x);
+ }
+
+ while (sz(dq) > 2 && dq[0].check(dq.end()[-1], dq.end()[-2]))
+ dq.pop_back();
+ while (sz(dq) > 2 && dq.end()[-1].check(dq[0], dq[1]))
+ dq.pop_front();
+
+ if (sz(dq) < 3) return {};
+ return dq;
+}
diff --git a/content/geometry/lines.cpp b/content/geometry/lines.cpp
new file mode 100644
index 0000000..95536a4
--- /dev/null
+++ b/content/geometry/lines.cpp
@@ -0,0 +1,33 @@
+struct line {
+ double a, b, c; // ax + by + c = 0; vertikale Line: b = 0, sonst: b = 1
+ line(pt p, pt q) : a(-imag(q-p)), b(real(q-p)), c(cross({b, -a},p)) {}
+};
+
+line pointsToLine(pt p1, pt p2) {
+ line l;
+ if (abs(real(p1 - p2)) < EPS) {
+ l.a = 1; l.b = 0.0; l.c = -real(p1);
+ } else {
+ l.a = -imag(p1 - p2) / real(p1 - p2);
+ l.b = 1.0;
+ l.c = -(l.a * real(p1)) - imag(p1);
+ }
+ return l;
+}
+
+bool parallel(line l1, line l2) {
+ return (abs(l1.a - l2.a) < EPS) && (abs(l1.b - l2.b) < EPS);
+}
+
+bool same(line l1, line l2) {
+ return parallel(l1, l2) && (abs(l1.c - l2.c) < EPS);
+}
+
+bool intersect(line l1, line l2, pt& p) {
+ if (parallel(l1, l2)) return false;
+ double y, x = (l2.b * l1.c - l1.b * l2.c) / (l2.a * l1.b - l1.a * l2.b);
+ if (abs(l1.b) > EPS) y = -(l1.a * x + l1.c);
+ else y = -(l2.a * x + l2.c);
+ p = {x, y};
+ return true;
+}
diff --git a/content/geometry/linesAndSegments.cpp b/content/geometry/linesAndSegments.cpp
new file mode 100644
index 0000000..1e21cba
--- /dev/null
+++ b/content/geometry/linesAndSegments.cpp
@@ -0,0 +1,89 @@
+// Liegt p auf der Geraden a-b? 2d und 3d
+bool pointOnLine(pt a, pt b, pt p) {
+ return ccw(a, b, p) == 0;
+}
+
+// Test auf Linienschnitt zwischen a-b und c-d. (nicht identisch)
+bool lineIntersection(pt a, pt b, pt c, pt d) {
+ return abs(cross(a - b, c - d)) < EPS;
+}
+
+// Berechnet den Schnittpunkt der Graden a-b und c-d.
+// die Graden dürfen nicht parallel sein!
+pt lineIntersection2(pt a, pt b, pt c, pt d) {
+ double x = cross(b - a, d - c);
+ double y = cross(c - a, d - c);
+ return a + y/x*(b - a);
+}
+
+// Entfernung von Punkt p zur Geraden durch a-b. 2d und 3d
+double distToLine(pt a, pt b, pt p) {
+ return abs(cross(p - a, b - a)) / abs(b - a);
+}
+
+// Projiziert p auf die Gerade a-b
+pt projectToLine(pt a, pt b, pt p) {
+ return a + (b - a) * dot(p - a, b - a) / norm(b - a);
+}
+
+// sortiert alle Punkte pts auf einer Linie entsprechend dir
+void sortLine(pt dir, vector<pt>& pts) { // (2d und 3d)
+ sort(all(pts), [&](pt a, pt b){
+ return dot(dir, a) < dot(dir, b);
+ });
+}
+
+// Liegt p auf der Strecke a-b? (nutze < für inberhalb)
+bool pointOnSegment(pt a, pt b, pt p) {
+ if (ccw(a, b, p) != 0) return false;
+ auto dist = norm(a - b);
+ return norm(a - p) <= dist && norm(b - p) <= dist;
+}
+
+// Entfernung von Punkt p zur Strecke a-b.
+double distToSegment(pt a, pt b, pt p) {
+ if (a == b) return abs(p - a);
+ if (dot(p - a, b - a) <= 0) return abs(p - a);
+ if (dot(p - b, b - a) >= 0) return abs(p - b);
+ return distToLine(a, b, p);
+}
+
+// Test auf Streckenschnitt zwischen a-b und c-d.
+bool segmentIntersection(pt a, pt b, pt c, pt d) {
+ if (ccw(a, b, c) == 0 && ccw(a, b, d) == 0)
+ return pointOnSegment(a,b,c) ||
+ pointOnSegment(a,b,d) ||
+ pointOnSegment(c,d,a) ||
+ pointOnSegment(c,d,b);
+ return ccw(a, b, c) * ccw(a, b, d) <= 0 &&
+ ccw(c, d, a) * ccw(c, d, b) <= 0;
+}
+
+// Berechnet die Schnittpunkte der Strecken a-b und c-d.
+// Enthält entweder keinen Punkt, den einzigen Schnittpunkt
+// oder die Endpunkte der Schnittstrecke.
+vector<pt> segmentIntersection2(pt a, pt b, pt c, pt d) {
+ double x = cross(b - a, d - c);
+ double y = cross(c - a, d - c);
+ double z = cross(b - a, a - c);
+ if (x < 0) {x = -x; y = -y; z = -z;}
+ if (y < -EPS || y-x > EPS || z < -EPS || z-x > EPS) return {};
+ if (x > EPS) return {a + y/x*(b - a)};
+ vector<pt> result;
+ auto insertUnique = [&](pt p) {
+ for (auto q : result) if (abs(p - q) < EPS) return;
+ result.push_back(p);
+ };
+ if (dot(c-a, d-a) < EPS) insertUnique(a);
+ if (dot(c-b, d-b) < EPS) insertUnique(b);
+ if (dot(a-c, b-c) < EPS) insertUnique(c);
+ if (dot(a-d, b-d) < EPS) insertUnique(d);
+ return result;
+}
+
+// Kürzeste Entfernung zwischen den Strecken a-b und c-d.
+double distBetweenSegments(pt a, pt b, pt c, pt d) {
+ if (segmentIntersection(a, b, c, d)) return 0.0;
+ return min({distToSegment(a, b, c), distToSegment(a, b, d),
+ distToSegment(c, d, a), distToSegment(c, d, b)});
+}
diff --git a/content/geometry/polygon.cpp b/content/geometry/polygon.cpp
new file mode 100644
index 0000000..3178290
--- /dev/null
+++ b/content/geometry/polygon.cpp
@@ -0,0 +1,150 @@
+// Flächeninhalt eines Polygons (nicht selbstschneidend).
+// Punkte gegen den Uhrzeigersinn: positiv, sonst negativ.
+double area(const vector<pt>& poly) { //poly[0] == poly.back()
+ ll res = 0;
+ for (int i = 0; i + 1 < sz(poly); i++)
+ res += cross(poly[i], poly[i + 1]);
+ return 0.5 * res;
+}
+
+// Anzahl ccw drehungen einer Polyline um einen Punkt
+// p nicht auf rand und poly[0] == poly.back()
+// res != 0 or (res & 1) != 0 um inside zu prüfen bei
+// selbstschneidenden Polygonen (definitions Sache)
+ll windingNumber(pt p, const vector<pt>& poly) {
+ ll res = 0;
+ for (int i = 0; i + 1 < sz(poly); i++) {
+ pt a = poly[i], b = poly[i + 1];
+ if (real(a) > real(b)) swap(a, b);
+ if (real(a) <= real(p) && real(p) < real(b) &&
+ cross(p, a, b) < 0) {
+ res += ccw(p, poly[i], poly[i + 1]);
+ }}
+ return res;
+}
+
+// Testet, ob ein Punkt im Polygon liegt (beliebige Polygone).
+// Ändere Zeile 32 falls rand zählt, poly[0] == poly.back()
+bool inside(pt p, const vector<pt>& poly) {
+ bool in = false;
+ for (int i = 0; i + 1 < sz(poly); i++) {
+ pt a = poly[i], b = poly[i + 1];
+ if (pointOnLineSegment(a, b, p)) return false;
+ if (real(a) > real(b)) swap(a,b);
+ if (real(a) <= real(p) && real(p) < real(b) &&
+ cross(p, a, b) < 0) {
+ in ^= 1;
+ }}
+ return in;
+}
+
+// convex hull without duplicates, h[0] != h.back()
+// apply comments if border counts as inside
+bool insideConvex(pt p, const vector<pt>& hull) {
+ int l = 0, r = sz(hull) - 1;
+ if (cross(hull[0], hull[r], p) >= 0) return false; // > 0
+ while (l + 1 < r) {
+ int m = (l + r) / 2;
+ if (cross(hull[0], hull[m], p) > 0) l = m; // >= 0
+ else r = m;
+ }
+ return cross(hull[l], hull[r], p) > 0; // >= 0
+}
+
+void rotateMin(vector<pt>& hull) {
+ auto mi = min_element(all(hull), [](const pt& a, const pt& b){
+ return real(a) == real(b) ? imag(a) < imag(b)
+ : real(a) < real(b);
+ });
+ rotate(hull.begin(), mi, hull.end());
+}
+
+// convex hulls without duplicates, h[0] != h.back()
+vector<pt> minkowski(vector<pt> ps, vector<pt> qs) {
+ rotateMin(ps);
+ rotateMin(qs);
+ ps.push_back(ps[0]);
+ qs.push_back(qs[0]);
+ ps.push_back(ps[1]);
+ qs.push_back(qs[1]);
+ vector<pt> res;
+ for (ll i = 0, j = 0; i + 2 < sz(ps) || j + 2 < sz(qs);) {
+ res.push_back(ps[i] + qs[j]);
+ auto c = cross(ps[i + 1] - ps[i], qs[j + 1] - qs[j]);
+ if(c >= 0) i++;
+ if(c <= 0) j++;
+ }
+ return res;
+}
+
+// convex hulls without duplicates, h[0] != h.back()
+double dist(const vector<pt>& ps, vector<pt> qs) {
+ for (pt& q : qs) q *= -1;
+ auto p = minkowski(ps, qs);
+ p.push_back(p[0]);
+ double res = INF;
+ bool intersect = true;
+ for (ll i = 0; i + 1 < sz(p); i++) {
+ intersect &= cross(p[i], p[i+1]) >= 0;
+ res = min(res, distToSegment(p[i], p[i+1], 0));
+ }
+ return intersect ? 0 : res;
+}
+
+bool left(pt of, pt p) {return cross(p, of) < 0 ||
+ (cross(p, of) == 0 && dot(p, of) > 0);}
+
+// convex hulls without duplicates, hull[0] == hull.back() and
+// hull[0] must be a convex point (with angle < pi)
+// returns index of corner where dot(dir, corner) is maximized
+int extremal(const vector<pt>& hull, pt dir) {
+ dir *= pt(0, 1);
+ int l = 0, r = sz(hull) - 1;
+ while (l + 1 < r) {
+ int m = (l + r) / 2;
+ pt dm = hull[m+1]-hull[m];
+ pt dl = hull[l+1]-hull[l];
+ if (left(dl, dir) != left(dl, dm)) {
+ if (left(dl, dm)) l = m;
+ else r = m;
+ } else {
+ if (cross(dir, dm) < 0) l = m;
+ else r = m;
+ }}
+ return r % (sz(hull) - 1);
+}
+
+// convex hulls without duplicates, hull[0] == hull.back() and
+// hull[0] must be a convex point (with angle < pi)
+// {} if no intersection
+// {x} if corner is only intersection
+// {i, j} segments (i,i+1) and (j,j+1) intersected (if only the
+// border is intersected corners i and j are the start and end)
+vector<int> intersectLine(const vector<pt>& hull, pt a, pt b) {
+ int endA = extremal(hull, (a-b) * pt(0, 1));
+ int endB = extremal(hull, (b-a) * pt(0, 1));
+ // cross == 0 => line only intersects border
+ if (cross(hull[endA], a, b) > 0 ||
+ cross(hull[endB], a, b) < 0) return {};
+
+ int n = sz(hull) - 1;
+ vector<int> res;
+ for (auto _ : {0, 1}) {
+ int l = endA, r = endB;
+ if (r < l) r += n;
+ while (l + 1 < r) {
+ int m = (l + r) / 2;
+ if (cross(hull[m % n], a, b) <= 0 &&
+ cross(hull[m % n], a, b) != cross(hull[endB], a, b))
+ l = m;
+ else r = m;
+ }
+ if (cross(hull[r % n], a, b) == 0) l++;
+ res.push_back(l % n);
+ swap(endA, endB);
+ swap(a, b);
+ }
+ if (res[0] == res[1]) res.pop_back();
+ return res;
+}
+
diff --git a/content/geometry/segmentIntersection.cpp b/content/geometry/segmentIntersection.cpp
new file mode 100644
index 0000000..4262ddc
--- /dev/null
+++ b/content/geometry/segmentIntersection.cpp
@@ -0,0 +1,63 @@
+struct seg {
+ pt a, b;
+ int id;
+ bool operator<(const seg& o) const {
+ if (real(a) < real(o.a)) {
+ int s = ccw(a, b, o.a);
+ return (s > 0 || (s == 0 && imag(a) < imag(o.a)));
+ } else if (real(a) > real(o.a)) {
+ int s = ccw(o.a, o.b, a);
+ return (s < 0 || (s == 0 && imag(a) < imag(o.a)));
+ }
+ return imag(a) < imag(o.a);
+ }
+};
+
+struct event {
+ pt p;
+ int id, type;
+ bool operator<(const event& o) const {
+ if (real(p) != real(o.p)) return real(p) < real(o.p);
+ if (type != o.type) return type > o.type;
+ return imag(p) < imag(o.p);
+ }
+};
+
+bool lessPT(const pt& a, const pt& b) {
+ return real(a) != real(b) ? real(a) < real(b)
+ : imag(a) < imag(b);
+}
+
+bool intersect(const seg& a, const seg& b) {
+ return lineSegmentIntersection(a.a, a.b, b.a, b.b);
+}
+
+pair<int, int> intersect(vector<seg>& segs) {
+ vector<event> events;
+ for (seg& s : segs) {
+ if (lessPT(s.b, s.a)) swap(s.b, s.a);
+ events.push_back({s.a, s.id, 1});
+ events.push_back({s.b, s.id, -1});
+ }
+ sort(all(events));
+
+ set<seg> q;
+ vector<set<seg>::iterator> where(sz(segs));
+ for (auto e : events) {
+ int id = e.id;
+ if (e.type > 0) {
+ auto it = q.lower_bound(segs[id]);
+ if (it != q.end() && intersect(*it, segs[id]))
+ return {it->id, segs[id].id};
+ if (it != q.begin() && intersect(*prev(it), segs[id]))
+ return {prev(it)->id, segs[id].id};
+ where[id] = q.insert(it, segs[id]);
+ } else {
+ auto it = where[id];
+ if (it != q.begin() && next(it) != q.end() && intersect(*next(it), *prev(it)))
+ return {next(it)->id, prev(it)->id};
+ q.erase(it);
+ }
+ }
+ return {-1, -1};
+}
diff --git a/content/geometry/sortAround.cpp b/content/geometry/sortAround.cpp
new file mode 100644
index 0000000..98d17a8
--- /dev/null
+++ b/content/geometry/sortAround.cpp
@@ -0,0 +1,11 @@
+bool left(pt p) {return real(p) < 0 ||
+ (real(p) == 0 && imag(p) < 0);}
+
+// counter clockwise, starting with "11:59"
+void sortAround(pt p, vector<pt>& ps) {
+ sort(all(ps), [&](const pt& a, const pt& b){
+ if (left(a - p) != left(b - p))
+ return left(a - p) > left(b - p);
+ return cross(p, a, b) > 0;
+ });
+}
diff --git a/content/geometry/spheres.cpp b/content/geometry/spheres.cpp
new file mode 100644
index 0000000..ec22262
--- /dev/null
+++ b/content/geometry/spheres.cpp
@@ -0,0 +1,29 @@
+// Great Circle Distance mit Längen- und Breitengrad.
+double gcDist(double pLat, double pLon,
+ double qLat, double qLon, double radius) {
+ pLat *= PI / 180; pLon *= PI / 180;
+ qLat *= PI / 180; qLon *= PI / 180;
+ return radius * acos(cos(pLat) * cos(pLon) *
+ cos(qLat) * cos(qLon) +
+ cos(pLat) * sin(pLon) *
+ cos(qLat) * sin(qLon) +
+ sin(pLat) * sin(qLat));
+}
+
+// Great Circle Distance mit kartesischen Koordinaten.
+double gcDist(point p, point q) {
+ return acos(p.x * q.x + p.y * q.y + p.z * q.z);
+}
+
+// 3D Punkt in kartesischen Koordinaten.
+struct point{
+ double x, y, z;
+ point() {}
+ point(double x, double y, double z) : x(x), y(y), z(z) {}
+ point(double lat, double lon) {
+ lat *= PI / 180.0; lon *= PI / 180.0;
+ x = cos(lat) * sin(lon);
+ y = cos(lat) * cos(lon);
+ z = sin(lat);
+ }
+};
diff --git a/content/geometry/triangle.cpp b/content/geometry/triangle.cpp
new file mode 100644
index 0000000..534bb10
--- /dev/null
+++ b/content/geometry/triangle.cpp
@@ -0,0 +1,43 @@
+// Mittelpunkt des Dreiecks abc.
+pt centroid(pt a, pt b, pt c) {return (a + b + c) / 3.0;}
+
+// Flächeninhalt eines Dreicks bei bekannten Eckpunkten.
+double area(pt a, pt b, pt c) {
+ return abs(cross(a, b, c)) / 2.0;
+}
+
+// Flächeninhalt eines Dreiecks bei bekannten Seitenlängen.
+double area(double a, double b, double c) {
+ double s = (a + b + c) / 2.0; //unpräzise
+ return sqrt(s * (s-a) * (s-b) * (s-c));
+}
+
+// Zentrum des größten Kreises im Dreiecke
+pt inCenter(pt a, pt b, pt c) {
+ double x = abs(a-b), y = abs(b-c), z = abs(a-c);
+ return (y*a + z*b + x*c) / (x+y+z);
+}
+
+// Zentrum des Kreises durch alle Eckpunkte
+// a, b und c nicht kollinear
+pt circumCenter(pt a, pt b, pt c) {
+ b -= a, c -= a;
+ pt d = b * norm(c) - c * norm(b);
+ d = {-d.imag(), d.real()};
+ return a + d / cross(b, c) / 2.0;
+}
+
+// -1 => p außerhalb Kreis durch a,b,c
+// 0 => p auf Kreis durch a,b,c
+// 1 => p im Kreis durch a,b,c
+int insideOutCenter(pt a, pt b, pt c, pt p) {// braucht lll
+ return ccw(a,b,c) * sgn(imag((c-b)*conj(p-c)*(a-p)*conj(b-a)));
+}
+
+// Sind die Dreiecke a1, b1, c1, and a2, b2, c2 ähnlich?
+// Erste Zeile testet Ähnlichkeit mit gleicher Orientierung,
+// zweite Zeile testet Ähnlichkeit mit verschiedener Orientierung
+bool similar(pt a1, pt b1, pt c1, pt a2, pt b2, pt c2) {
+ return (b2-a2) * (c1-a1) == (b1-a1) * (c2-a2) ||
+ (b2-a2) * conj(c1-a1) == conj(b1-a1) * (c2-a2);
+}
diff --git a/content/geometry/triangle.tex b/content/geometry/triangle.tex
new file mode 100644
index 0000000..3decd54
--- /dev/null
+++ b/content/geometry/triangle.tex
@@ -0,0 +1,41 @@
+
+\begin{minipage}[T]{0.27\linewidth}
+ Generell:
+ \begin{itemize}
+ \item $\cos(\gamma)=\frac{a^2+b^2-c^2}{2ab}$
+ \item $b=\frac{a}{\sin(\alpha)}\sin(\beta)$
+ %\item $b=\frac{a}{\sin(\pi-\beta-\gamma)}\sin(\beta)$
+ %\item $\sin(\beta)=\frac{b\sin(\alpha)}{a}$ %asin is not uniquely invertible
+ \item $\Delta=\frac{bc}{2}\sin(\alpha)$
+ \end{itemize}
+\end{minipage}
+\hfill
+\begin{minipage}[B]{0.5\linewidth}
+ \centering
+ \begin{tikzpicture}[line cap=round,minimum size=0,x=.7cm,y=0.7cm]
+ \node[circle,inner sep=0] (AA) at (0,0) {$A$};
+ \node[circle,inner sep=0] (BB) at (3,-1) {$B$};
+ \node[circle,inner sep=0] (CC) at (3.666667,1) {$C$};
+
+ \coordinate (A) at (AA.0);
+ \coordinate (B) at (BB.100);
+ \coordinate (C) at (CC.210);
+
+ \pic[draw,angle radius=15,pic text=$\gamma$]{angle = A--C--B};
+ \pic[draw,angle radius=15,pic text=$\beta$]{angle = C--B--A};
+ \pic[draw,angle radius=20,pic text=$\alpha$]{angle = B--A--C};
+
+ \draw (A) to[edge label={$b$},inner sep=1] (C);
+ \draw (A) to[edge label'={$c$},inner sep=1.3] (B);
+ \draw (B) to[edge label'={$a$},inner sep=0.6] (C);
+ \end{tikzpicture}
+\end{minipage}
+\hfill
+\begin{minipage}[T]{0.16\linewidth}
+ $\beta=90^\circ$:
+ \begin{itemize}
+ \item $\sin(\alpha)=\frac{a}{b}$
+ \item $\cos(\alpha)=\frac{c}{b}$
+ \item $\tan(\alpha)=\frac{a}{c}$
+ \end{itemize}
+\end{minipage}
diff --git a/content/graph/2sat.cpp b/content/graph/2sat.cpp
new file mode 100644
index 0000000..75e54e6
--- /dev/null
+++ b/content/graph/2sat.cpp
@@ -0,0 +1,31 @@
+struct sat2 {
+ int n; // + scc variablen
+ vector<int> sol;
+
+ sat2(int vars) : n(vars*2), adj(n) {}
+
+ static int var(int i) {return i << 1;} // use this!
+
+ void addImpl(int a, int b) {
+ adj[a].push_back(b);
+ adj[1^b].push_back(1^a);
+ }
+ void addEquiv(int a, int b) {addImpl(a, b); addImpl(b, a);}
+ void addOr(int a, int b) {addImpl(1^a, b);}
+ void addXor(int a, int b) {addOr(a, b); addOr(1^a, 1^b);}
+ void addTrue(int a) {addImpl(1^a, a);}
+ void addFalse(int a) {addTrue(1^a);}
+ void addAnd(int a, int b) {addTrue(a); addTrue(b);}
+ void addNand(int a, int b) {addOr(1^a, 1^b);}
+
+ bool solve() {
+ scc(); //scc code von oben
+ sol.assign(n, -1);
+ for (int i = 0; i < n; i += 2) {
+ if (idx[i] == idx[i + 1]) return false;
+ sol[i] = idx[i] < idx[i + 1];
+ sol[i + 1] = !sol[i];
+ }
+ return true;
+ }
+};
diff --git a/content/graph/LCA_sparse.cpp b/content/graph/LCA_sparse.cpp
new file mode 100644
index 0000000..221b5ed
--- /dev/null
+++ b/content/graph/LCA_sparse.cpp
@@ -0,0 +1,32 @@
+struct LCA {
+ vector<ll> depth;
+ vector<int> visited, first;
+ int idx;
+ SparseTable st; //sparse table @\sourceref{datastructures/sparseTable.cpp}@
+
+ void init(vector<vector<int>>& adj, int root) {
+ depth.assign(2 * sz(adj), 0);
+ visited.assign(2 * sz(adj), -1);
+ first.assign(sz(adj), 2 * sz(adj));
+ idx = 0;
+ dfs(adj, root);
+ st.init(&depth);
+ }
+
+ void dfs(vector<vector<int>>& adj, int v, ll d=0) {
+ visited[idx] = v, depth[idx] = d;
+ first[v] = min(idx, first[v]), idx++;
+
+ for (int u : adj[v]) {
+ if (first[u] == 2 * sz(adj)) {
+ dfs(adj, u, d + 1);
+ visited[idx] = v, depth[idx] = d, idx++;
+ }}}
+
+ int getLCA(int u, int v) {
+ if (first[u] > first[v]) swap(u, v);
+ return visited[st.queryIdempotent(first[u], first[v] + 1)];
+ }
+
+ ll getDepth(int v) {return depth[first[v]];}
+};
diff --git a/content/graph/TSP.cpp b/content/graph/TSP.cpp
new file mode 100644
index 0000000..6223858
--- /dev/null
+++ b/content/graph/TSP.cpp
@@ -0,0 +1,29 @@
+vector<vector<ll>> dist; // Entfernung zwischen je zwei Punkten.
+
+auto TSP() {
+ int n = sz(dist), m = 1 << n;
+ vector<vector<edge>> dp(n, vector<edge>(m, edge{INF, -1}));
+
+ for (int c = 0; c < n; c++)
+ dp[c][m-1].dist = dist[c][0], dp[c][m-1].to = 0;
+
+ for (int v = m - 2; v >= 0; v--) {
+ for (int c = n - 1; c >= 0; c--) {
+ for (int g = 0; g < n; g++) {
+ if (g != c && !((1 << g) & v)) {
+ if ((dp[g][(v | (1 << g))].dist + dist[c][g]) <
+ dp[c][v].dist) {
+ dp[c][v].dist =
+ dp[g][(v | (1 << g))].dist + dist[c][g];
+ dp[c][v].to = g;
+ }}}}}
+ // return dp[0][1]; // Länge der Tour
+
+ vector<int> tour = {0};
+ int v = 0;
+ while (tour.back() != 0 || sz(tour) == 1)
+ tour.push_back(dp[tour.back()]
+ [(v |= (1 << tour.back()))].to);
+ // Enthält Knoten 0 zweimal. An erster und letzter Position.
+ return tour;
+}
diff --git a/content/graph/articulationPoints.cpp b/content/graph/articulationPoints.cpp
new file mode 100644
index 0000000..25ff67e
--- /dev/null
+++ b/content/graph/articulationPoints.cpp
@@ -0,0 +1,43 @@
+vector<vector<Edge>> adj;
+vector<int> num;
+int counter, rootCount, root;
+vector<bool> isArt;
+vector<Edge> bridges, st;
+vector<vector<Edge>> bcc;
+
+int dfs(int v, int from = -1) {
+ int me = num[v] = ++counter, top = me;
+ for (Edge& e : adj[v]) {
+ if (e.id == from) continue;
+ if (num[e.to]) {
+ top = min(top, num[e.to]);
+ if (num[e.to] < me) st.push_back(e);
+ } else {
+ if (v == root) rootCount++;
+ int si = sz(st);
+ int up = dfs(e.to, e.id);
+ top = min(top, up);
+ if (up >= me) isArt[v] = true;
+ if (up > me) bridges.push_back(e);
+ if (up <= me) st.push_back(e);
+ if (up == me) {
+ bcc.emplace_back(si + all(st));
+ st.resize(si);
+ }}}
+ return top;
+}
+
+void find() {
+ counter = 0;
+ num.assign(sz(adj), 0);
+ isArt.assign(sz(adj), false);
+ bridges.clear();
+ st.clear();
+ bcc.clear();
+ for (int v = 0; v < sz(adj); v++) {
+ if (!num[v]) {
+ root = v;
+ rootCount = 0;
+ dfs(v);
+ isArt[v] = rootCount > 1;
+}}}
diff --git a/content/graph/bellmannFord.cpp b/content/graph/bellmannFord.cpp
new file mode 100644
index 0000000..09ea1aa
--- /dev/null
+++ b/content/graph/bellmannFord.cpp
@@ -0,0 +1,19 @@
+auto bellmannFord(int n, vector<edge>& edges, int start) {
+ vector<ll> dist(n, INF), prev(n, -1);
+ dist[start] = 0;
+
+ for (int i = 1; i < n; i++) {
+ for (edge& e : edges) {
+ if (dist[e.from] != INF &&
+ dist[e.from] + e.cost < dist[e.to]) {
+ dist[e.to] = dist[e.from] + e.cost;
+ prev[e.to] = e.from;
+ }}}
+
+ for (edge& e : edges) {
+ if (dist[e.from] != INF &&
+ dist[e.from] + e.cost < dist[e.to]) {
+ // Negativer Kreis gefunden.
+ }}
+ return dist; //return prev?
+}
diff --git a/content/graph/bitonicTSP.cpp b/content/graph/bitonicTSP.cpp
new file mode 100644
index 0000000..6470232
--- /dev/null
+++ b/content/graph/bitonicTSP.cpp
@@ -0,0 +1,31 @@
+vector<vector<double>> dist; // Initialisiere mit Entfernungen zwischen Punkten.
+
+auto bitonicTSP() {
+ vector<double> dp(sz(dist), HUGE_VAL);
+ vector<int> pre(sz(dist)); // nur für Tour
+ dp[0] = 0; dp[1] = 2 * dist[0][1]; pre[1] = 0;
+ for (unsigned int i = 2; i < sz(dist); i++) {
+ double link = 0;
+ for (int j = i - 2; j >= 0; j--) {
+ link += dist[j + 1][j + 2];
+ double opt = link + dist[j][i] + dp[j + 1] - dist[j][j + 1];
+ if (opt < dp[i]) {
+ dp[i] = opt;
+ pre[i] = j;
+ }}}
+ // return dp.back(); // Länger der Tour
+
+ int j, n = sz(dist) - 1;
+ vector<int> ut, lt = {n, n - 1};
+ do {
+ j = pre[n];
+ (lt.back() == n ? lt : ut).push_back(j);
+ for (int i = n - 1; i > j + 1; i--) {
+ (lt.back() == i ? lt : ut).push_back(i - 1);
+ }
+ } while(n = j + 1, j > 0);
+ (lt.back() == 1 ? lt : ut).push_back(0);
+ reverse(all(lt));
+ lt.insert(lt.end(), all(ut));
+ return lt;// Enthält Knoten 0 zweimal. An erster und letzter Position.
+}
diff --git a/content/graph/bitonicTSPsimple.cpp b/content/graph/bitonicTSPsimple.cpp
new file mode 100644
index 0000000..8b6e6c5
--- /dev/null
+++ b/content/graph/bitonicTSPsimple.cpp
@@ -0,0 +1,27 @@
+vector<vector<double>> dist; // Entfernungen zwischen Punkten.
+vector<vector<double>> dp;
+
+double get(int p1, int p2) {
+ int v = max(p1, p2) + 1;
+ if (v == sz(dist)) return dist[p1][v - 1] + dist[p2][v - 1];
+ if (dp[p1][p2] >= 0.0) return dp[p1][p2];
+ double tryLR = dist[p1][v] + get(v, p2);
+ double tryRL = dist[p2][v] + get(p1, v);
+ return dp[p1][p2] = min(tryLR, tryRL);
+}
+
+auto bitonicTSP() {
+ dp = vector<vector<double>>(sz(dist),
+ vector<double>(sz(dist), -1));
+ get(0, 0);
+ // return dp[0][0]; // Länger der Tour
+ vector<int> lr = {0}, rl = {0};
+ for (int p1 = 0, p2 = 0, v; (v = max(p1, p2)+1) < sz(dist);) {
+ if (dp[p1][p2] == dist[p1][v] + dp[v][p2]) {
+ lr.push_back(v); p1 = v;
+ } else {
+ rl.push_back(v); p2 = v;
+ }}
+ lr.insert(lr.end(), rl.rbegin(), rl.rend());
+ return lr;// Enthält Knoten 0 zweimal. An erster und letzter Position.
+}
diff --git a/content/graph/blossom.cpp b/content/graph/blossom.cpp
new file mode 100644
index 0000000..7bd494a
--- /dev/null
+++ b/content/graph/blossom.cpp
@@ -0,0 +1,82 @@
+struct GM {
+ vector<vector<int>> adj;
+ // pairs ist der gematchte knoten oder n
+ vector<int> pairs, first, que;
+ vector<pair<int, int>> label;
+ int head, tail;
+
+ GM(int n) : adj(n), pairs(n + 1, n), first(n + 1, n),
+ que(n), label(n + 1, {-1, -1}) {}
+
+ void rematch(int u, int v) {
+ int t = pairs[u]; pairs[u] = v;
+ if (pairs[t] != u) return;
+ if (label[u].second == -1) {
+ pairs[t] = label[u].first;
+ rematch(pairs[t], t);
+ } else {
+ auto [x, y] = label[u];
+ rematch(x, y);
+ rematch(y, x);
+ }}
+
+ int findFirst(int v) {
+ return label[first[v]].first < 0 ? first[v]
+ : first[v] = findFirst(first[v]);
+ }
+
+ void relabel(int x, int y) {
+ int r = findFirst(x);
+ int s = findFirst(y);
+ if (r == s) return;
+ auto h = label[r] = label[s] = {~x, y};
+ int join;
+ while (true) {
+ if (s != sz(adj)) swap(r, s);
+ r = findFirst(label[pairs[r]].first);
+ if (label[r] == h) {
+ join = r;
+ break;
+ } else {
+ label[r] = h;
+ }}
+ for (int v : {first[x], first[y]}) {
+ for (; v != join; v = first[label[pairs[v]].first]) {
+ label[v] = {x, y};
+ first[v] = join;
+ que[tail++] = v;
+ }}}
+
+ bool augment(int v) {
+ label[v] = {sz(adj), -1};
+ first[v] = sz(adj);
+ head = tail = 0;
+ for (que[tail++] = v; head < tail;) {
+ int x = que[head++];
+ for (int y : adj[x]) {
+ if (pairs[y] == sz(adj) && y != v) {
+ pairs[y] = x;
+ rematch(x, y);
+ return true;
+ } else if (label[y].first >= 0) {
+ relabel(x, y);
+ } else if (label[pairs[y]].first == -1) {
+ label[pairs[y]].first = x;
+ first[pairs[y]] = y;
+ que[tail++] = pairs[y];
+ }}}
+ return false;
+ }
+
+ int match() {
+ int matching = head = tail = 0;
+ for (int v = 0; v < sz(adj); v++) {
+ if (pairs[v] < sz(adj) || !augment(v)) continue;
+ matching++;
+ for (int i = 0; i < tail; i++)
+ label[que[i]] = label[pairs[que[i]]] = {-1, -1};
+ label[sz(adj)] = {-1, -1};
+ }
+ return matching;
+ }
+};
diff --git a/content/graph/bronKerbosch.cpp b/content/graph/bronKerbosch.cpp
new file mode 100644
index 0000000..0cfcc5f
--- /dev/null
+++ b/content/graph/bronKerbosch.cpp
@@ -0,0 +1,24 @@
+using bits = bitset<64>;
+vector<bits> adj, cliques;
+
+void addEdge(int a, int b) {
+ if (a != b) adj[a][b] = adj[b][a] = 1;
+}
+
+void bronKerboschRec(bits R, bits P, bits X) {
+ if (P.none() && X.none()) {
+ cliques.push_back(R);
+ } else {
+ int q = min(P._Find_first(), X._Find_first());
+ bits cands = P & ~adj[q];
+ for (int i = 0; i < sz(adj); i++) if (cands[i]) {
+ R[i] = 1;
+ bronKerboschRec(R, P & adj[i], X & adj[i]);
+ R[i] = P[i] = 0;
+ X[i] = 1;
+}}}
+
+void bronKerbosch() {
+ cliques.clear();
+ bronKerboschRec({}, {(1ull << sz(adj)) - 1}, {});
+}
diff --git a/content/graph/centroid.cpp b/content/graph/centroid.cpp
new file mode 100644
index 0000000..820945b
--- /dev/null
+++ b/content/graph/centroid.cpp
@@ -0,0 +1,21 @@
+vector<int> s;
+void dfs_sz(int v, int from = -1) {
+ s[v] = 1;
+ for (int u : adj[v]) if (u != from) {
+ dfs_sz(u, v);
+ s[v] += s[u];
+}}
+
+pair<int, int> dfs_cent(int v, int from, int n) {
+ for (int u : adj[v]) if (u != from) {
+ if (2 * s[u] == n) return {v, u};
+ if (2 * s[u] > n) return dfs_cent(u, v, n);
+ }
+ return {v, -1};
+}
+
+pair<int, int> find_centroid(int root = 0) {
+ s.resize(sz(adj));
+ dfs_sz(root);
+ return dfs_cent(root, -1, s[root]);
+}
diff --git a/content/graph/connect.cpp b/content/graph/connect.cpp
new file mode 100644
index 0000000..ffcd6c2
--- /dev/null
+++ b/content/graph/connect.cpp
@@ -0,0 +1,31 @@
+struct connect {
+ int n;
+ vector<pair<int, int>> edges;
+ LCT lct; // min LCT @\sourceref{datastructures/LCT.cpp}@, no updates required
+
+ connect(int n, int m) : n(n), edges(m), lct(n+m) {}
+
+ bool connected(int u, int v) {
+ return lct.connected(&lct.nodes[u], &lct.nodes[v]);
+ }
+
+ void addEdge(int u, int v, int id) {
+ lct.nodes[id + n] = LCT::Node(id + n, id + n);
+ edges[id] = {u, v};
+ if (connected(u, v)) {
+ int old = lct.query(&lct.nodes[u], &lct.nodes[v]);
+ if (old < id) eraseEdge(old);
+ }
+ if (!connected(u, v)) {
+ lct.link(&lct.nodes[u], &lct.nodes[id + n]);
+ lct.link(&lct.nodes[v], &lct.nodes[id + n]);
+ }}
+
+ void eraseEdge(ll id) {
+ if (connected(edges[id].first, edges[id].second) &&
+ lct.query(&lct.nodes[edges[id].first],
+ &lct.nodes[edges[id].second]) == id) {
+ lct.cut(&lct.nodes[edges[id].first], &lct.nodes[id + n]);
+ lct.cut(&lct.nodes[edges[id].second], &lct.nodes[id + n]);
+ }}
+};
diff --git a/content/graph/cycleCounting.cpp b/content/graph/cycleCounting.cpp
new file mode 100644
index 0000000..6a299ee
--- /dev/null
+++ b/content/graph/cycleCounting.cpp
@@ -0,0 +1,64 @@
+constexpr int maxEdges = 128;
+using cycle = bitset<maxEdges>;
+struct cycles {
+ vector<vector<pair<int, int>>> adj;
+ vector<bool> seen;
+ vector<cycle> paths, base;
+ vector<pair<int, int>> edges;
+
+ cycles(int n) : adj(n), seen(n), paths(n) {}
+
+ void addEdge(int u, int v) {
+ adj[u].push_back({v, sz(edges)});
+ adj[v].push_back({u, sz(edges)});
+ edges.push_back({u, v});
+ }
+
+ void addBase(cycle cur) {
+ for (cycle o : base) {
+ o ^= cur;
+ if (o._Find_first() > cur._Find_first()) cur = o;
+ }
+ if (cur.any()) base.push_back(cur);
+ }
+
+ void findBase(int v, int from = -1, cycle cur = {}) {
+ if (from < 0 && seen[v]) return;
+ if (seen[v]) {
+ addBase(cur ^ paths[v]);
+ } else {
+ seen[v] = true;
+ paths[v] = cur;
+ for (auto [u, id] : adj[v]) {
+ if (u == from) continue;
+ cur[id].flip();
+ findBase(u, v, cur);
+ cur[id].flip();
+ }}}
+
+ bool isCycle(cycle cur) {//cycle must be constrcuted from base
+ if (cur.none()) return false;
+ init(sz(adj)); // union find @\sourceref{datastructures/unionFind.cpp}@
+ for (int i = 0; i < sz(edges); i++) {
+ if (cur[i]) {
+ cur[i] = false;
+ if (findSet(edges[i].first) ==
+ findSet(edges[i].second)) break;
+ unionSets(edges[i].first, edges[i].second);
+ }}
+ return cur.none();
+ }
+
+ int count() {
+ for (int i = 0; i < sz(adj); i++) findBase(i);
+ assert(sz(base) < 30);
+ int res = 0;
+ for (int i = 1; i < (1 << sz(base)); i++) {
+ cycle cur;
+ for (int j = 0; j < sz(base); j++)
+ if (((i >> j) & 1) != 0) cur ^= base[j];
+ if (isCycle(cur)) res++;
+ }
+ return res;
+ }
+};
diff --git a/content/graph/dfs.tex b/content/graph/dfs.tex
new file mode 100644
index 0000000..1e6705f
--- /dev/null
+++ b/content/graph/dfs.tex
@@ -0,0 +1,16 @@
+\begin{expandtable}
+\begin{tabularx}{\linewidth}{|X|XIXIX|}
+ \hline
+ Kantentyp $(v, w)$ & \code{dfs[v] < dfs[w]} & \code{fin[v] > fin[w]} & \code{seen[w]} \\
+ %$(v, w)$ & \code{dfs[w]} & \code{fin[w]} & \\
+ \hline
+ in-tree & \code{true} & \code{true} & \code{false} \\
+ \grayhline
+ forward & \code{true} & \code{true} & \code{true} \\
+ \grayhline
+ backward & \code{false} & \code{false} & \code{true} \\
+ \grayhline
+ cross & \code{false} & \code{true} & \code{true} \\
+ \hline
+\end{tabularx}
+\end{expandtable}
diff --git a/content/graph/dijkstra.cpp b/content/graph/dijkstra.cpp
new file mode 100644
index 0000000..61c636d
--- /dev/null
+++ b/content/graph/dijkstra.cpp
@@ -0,0 +1,21 @@
+using path = pair<ll, int>; //dist, destination
+
+auto dijkstra(const vector<vector<path>>& adj, int start) {
+ priority_queue<path, vector<path>, greater<path>> pq;
+ vector<ll> dist(sz(adj), INF);
+ vector<int> prev(sz(adj), -1);
+ dist[start] = 0; pq.emplace(0, start);
+
+ while (!pq.empty()) {
+ auto [dv, v] = pq.top(); pq.pop();
+ if (dv > dist[v]) continue; // WICHTIG!
+
+ for (auto [du, u] : adj[v]) {
+ ll newDist = dv + du;
+ if (newDist < dist[u]) {
+ dist[u] = newDist;
+ prev[u] = v;
+ pq.emplace(dist[u], u);
+ }}}
+ return dist; //return prev;
+}
diff --git a/content/graph/dinicScaling.cpp b/content/graph/dinicScaling.cpp
new file mode 100644
index 0000000..f4e833a
--- /dev/null
+++ b/content/graph/dinicScaling.cpp
@@ -0,0 +1,51 @@
+struct Edge {
+ int to, rev;
+ ll f, c;
+};
+
+vector<vector<Edge>> adj;
+int s, t;
+vector<int> pt, dist;
+
+void addEdge(int u, int v, ll c) {
+ adj[u].push_back({v, (int)sz(adj[v]), 0, c});
+ adj[v].push_back({u, (int)sz(adj[u]) - 1, 0, 0});
+}
+
+bool bfs(ll lim) {
+ dist.assign(sz(adj), -1);
+ dist[s] = 0;
+ queue<int> q({s});
+ while (!q.empty() && dist[t] < 0) {
+ int v = q.front(); q.pop();
+ for (Edge& e : adj[v]) {
+ if (dist[e.to] < 0 && e.c - e.f >= lim) {
+ dist[e.to] = dist[v] + 1;
+ q.push(e.to);
+ }}}
+ return dist[t] >= 0;
+}
+
+bool dfs(int v, ll flow) {
+ if (v == t) return true;
+ for (; pt[v] < sz(adj[v]); pt[v]++) {
+ Edge& e = adj[v][pt[v]];
+ if (dist[e.to] != dist[v] + 1) continue;
+ if (e.c - e.f >= flow && dfs(e.to, flow)) {
+ e.f += flow;
+ adj[e.to][e.rev].f -= flow;
+ return true;
+ }}
+ return false;
+}
+
+ll maxFlow(int source, int target) {
+ s = source, t = target;
+ ll flow = 0;
+ for (ll lim = (1LL << 62); lim >= 1; lim /= 2) {
+ while (bfs(lim)) {
+ pt.assign(sz(adj), 0);
+ while (dfs(s, lim)) flow += lim;
+ }}
+ return flow;
+}
diff --git a/content/graph/euler.cpp b/content/graph/euler.cpp
new file mode 100644
index 0000000..a5ea192
--- /dev/null
+++ b/content/graph/euler.cpp
@@ -0,0 +1,23 @@
+vector<vector<int>> idx;
+vector<int> to, validIdx, cycle;
+vector<bool> used;
+
+void addEdge(int u, int v) {
+ idx[u].push_back(sz(to));
+ to.push_back(v);
+ used.push_back(false);
+ idx[v].push_back(sz(to)); // für ungerichtet
+ to.push_back(u);
+ used.push_back(false);
+}
+
+void euler(int v) { // init idx und validIdx
+ for (;validIdx[v] < sz(idx[v]); validIdx[v]++) {
+ if (!used[idx[v][validIdx[v]]]) {
+ int u = to[idx[v][validIdx[v]]];
+ used[idx[v][validIdx[v]]] = true;
+ used[idx[v][validIdx[v]] ^ 1] = true; // für ungerichtet
+ euler(u);
+ }}
+ cycle.push_back(v); // Zyklus in umgekehrter Reihenfolge.
+}
diff --git a/content/graph/floydWarshall.cpp b/content/graph/floydWarshall.cpp
new file mode 100644
index 0000000..df096c2
--- /dev/null
+++ b/content/graph/floydWarshall.cpp
@@ -0,0 +1,27 @@
+vector<vector<ll>> dist; // Entfernung zwischen je zwei Punkten.
+vector<vector<int>> next;
+
+void floydWarshall() {
+ next.assign(sz(dist), vector<int>(sz(dist), -1));
+ for (int i = 0; i < sz(dist); i++) {
+ for (int j = 0; j < sz(dist); j++) {
+ if (dist[i][j] < INF) {
+ next[i][j] = j;
+ }}}
+
+ for (int k = 0; k < sz(dist); k++) {
+ for (int i = 0; i < sz(dist); i++) {
+ for (int j = 0; j < sz(dist); j++) {
+ // only needed if dist can be negative
+ if (dist[i][k] == INF || dist[k][j] == INF) continue;
+ if (dist[i][j] > dist[i][k] + dist[k][j]) {
+ dist[i][j] = dist[i][k] + dist[k][j];
+ next[i][j] = next[i][k];
+}}}}}
+
+vector<int> getPath(int u, int v) {
+ if (next[u][v] < 0) return {};
+ vector<int> path = {u};
+ while (u != v) path.push_back(u = next[u][v]);
+ return path; //Pfad u -> v
+}
diff --git a/content/graph/graph.tex b/content/graph/graph.tex
new file mode 100644
index 0000000..831f4e5
--- /dev/null
+++ b/content/graph/graph.tex
@@ -0,0 +1,269 @@
+\section{Graphen}
+
+\begin{algorithm}{Kruskal}
+ \begin{methods}[ll]
+ berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\
+ \end{methods}
+ \sourcecode{graph/kruskal.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Minimale Spannbäume}
+ \paragraph{Schnitteigenschaft}
+ Für jeden Schnitt $C$ im Graphen gilt:
+ Gibt es eine Kante $e$, die echt leichter ist als alle anderen Schnittkanten, so gehört diese zu allen minimalen Spannbäumen.
+ ($\Rightarrow$ Die leichteste Kante in einem Schnitt kann in einem minimalen Spannbaum verwendet werden.)
+
+ \paragraph{Kreiseigenschaft}
+ Für jeden Kreis $K$ im Graphen gilt:
+ Die schwerste Kante auf dem Kreis ist nicht Teil des minimalen Spannbaums.
+\end{algorithm}
+
+\begin{algorithm}{Heavy-Light Decomposition}
+ \begin{methods}
+ \method{get\_intervals}{gibt Zerlegung des Pfades von $u$ nach $v$}{\log(\abs{V})}
+ \end{methods}
+ \textbf{Wichtig:} Intervalle sind halboffen
+
+ Subbaum unter dem Knoten $v$ ist das Intervall $[\text{\code{in[v]}},~\text{\code{out[v]}})$.
+ \sourcecode{graph/hld.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Lowest Common Ancestor}
+ \begin{methods}
+ \method{init}{baut DFS-Baum über $g$ auf}{\abs{V}\*\log(\abs{V})}
+ \method{getLCA}{findet LCA}{1}
+ \method{getDepth}{berechnet Distanz zur Wurzel im DFS-Baum}{1}
+ \end{methods}
+ \sourcecode{graph/LCA_sparse.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Centroids}
+ \begin{methods}
+ \method{find\_centroid}{findet alle Centroids des Baums (maximal 2)}{\abs{V}}
+ \end{methods}
+ \sourcecode{graph/centroid.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Eulertouren}
+ \begin{methods}
+ \method{euler}{berechnet den Kreis}{\abs{V}+\abs{E}}
+ \end{methods}
+ \sourcecode{graph/euler.cpp}
+ \begin{itemize}
+ \item Zyklus existiert, wenn jeder Knoten geraden Grad hat (ungerichtet),\\ bei jedem Knoten Ein- und Ausgangsgrad übereinstimmen (gerichtet).
+ \item Pfad existiert, wenn genau $\{0, 2\}$ Knoten ungeraden Grad haben (ungerichtet),\\ bei allen Knoten Ein- und Ausgangsgrad übereinstimmen oder einer eine Ausgangskante mehr hat (Startknoten) und einer eine Eingangskante mehr hat (Endknoten).
+ \item \textbf{Je nach Aufgabenstellung überprüfen, wie ein unzusammenhängender Graph interpretiert werden sollen.}
+ \item Wenn eine bestimmte Sortierung verlangt wird oder Laufzeit vernachlässigbar ist, ist eine Implementierung mit einem \code{vector<set<int>> adj} leichter
+ \item \textbf{Wichtig:} Algorithmus schlägt nicht fehl, falls kein Eulerzyklus existiert.
+ Die Existenz muss separat geprüft werden.
+ \end{itemize}
+\end{algorithm}
+
+\begin{algorithm}{Baum-Isomorphie}
+ \begin{methods}
+ \method{treeLabel}{berechnet kanonischen Namen für einen Baum}{\abs{V}\*\log(\abs{V})}
+ \end{methods}
+ \sourcecode{graph/treeIsomorphism.cpp}
+\end{algorithm}
+
+\subsection{Kürzeste Wege}
+
+\subsubsection{\textsc{Bellmann-Ford}-Algorithmus}
+\method{bellmanFord}{kürzeste Pfade oder negative Kreise finden}{\abs{V}\*\abs{E}}
+\sourcecode{graph/bellmannFord.cpp}
+
+\subsubsection{Algorithmus von \textsc{Dijkstra}}
+\method{dijkstra}{kürzeste Pfade in Graphen ohne negative Kanten}{\abs{E}\*\log(\abs{V})}
+\sourcecode{graph/dijkstra.cpp}
+
+\subsubsection{\textsc{Floyd-Warshall}-Algorithmus}
+\method{floydWarshall}{kürzeste Pfade oder negative Kreise finden}{\abs{V}^3}
+\begin{itemize}
+ \item \code{dist[i][i] = 0, dist[i][j] = edge\{j, j\}.weight} oder \code{INF}
+ \item \code{i} liegt auf einem negativen Kreis $\Leftrightarrow$ \code{dist[i][i] < 0}.
+\end{itemize}
+\sourcecode{graph/floydWarshall.cpp}
+
+\subsubsection{Matrix-Algorithmus}
+Sei $d_{i\smash{j}}$ die Distanzmatrix von $G$, dann gibt $d_{i\smash{j}}^k$ die kürzeste Distanz von $i$ nach $j$ mit maximal $k$ kanten an mit der Verknüpfung: $c_{i\smash{j}} = a_{i\smash{j}} \otimes b_{i\smash{j}} = \min\{a_{ik} \cdot b_{k\smash{j}}\}$
+
+
+Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, dann gibt $a_{i\smash{j}}^k$ die Anzahl der Wege von $i$ nach $j$ mit Länge genau \textcolor{gray}{(maximal)} $k$ an mit der Verknüpfung: $c_{i\smash{j}} = a_{i\smash{j}} \otimes b_{i\smash{j}} = \sum a_{ik} \cdot b_{k\smash{j}}$
+
+\begin{algorithm}{Dynamic Connectivity}
+ \begin{methods}
+ \method{Constructor}{erzeugt Baum ($n$ Knoten, $m$ updates)}{n+m}
+ \method{addEdge}{fügt Kannte ein,\code{id}=delete Zeitpunkt}{\log(n)}
+ \method{eraseEdge}{entfernt Kante \code{id}}{\log(n)}
+ \end{methods}
+ \sourcecode{graph/connect.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Erd\H{o}s-Gallai}
+ Sei $d_1 \geq \cdots \geq d_{n}$. Es existiert genau dann ein Graph $G$ mit Degreesequence $d$ falls $\sum\limits_{i=1}^{n} d_i$ gerade ist und für $1\leq k \leq n$: $\sum\limits_{i=1}^{k} d_i \leq k\cdot(k-1)+\sum\limits_{i=k+1}^{n} \min(d_i, k)$
+ \begin{methods}
+ \method{havelHakimi}{findet Graph}{(\abs{V}+\abs{E})\cdot\log(\abs{V})}
+ \end{methods}
+ \sourcecode{graph/havelHakimi.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Strongly Connected Components (\textsc{Tarjan})}
+ \begin{methods}
+ \method{scc}{berechnet starke Zusammenhangskomponenten}{\abs{V}+\abs{E}}
+ \end{methods}
+ \sourcecode{graph/scc.cpp}
+\end{algorithm}
+
+\begin{algorithm}{DFS}
+ \input{graph/dfs}
+\end{algorithm}
+
+\begin{algorithm}{Artikulationspunkte, Brücken und BCC}
+ \begin{methods}
+ \method{find}{berechnet Artikulationspunkte, Brücken und BCC}{\abs{V}+\abs{E}}
+ \end{methods}
+ \textbf{Wichtig:} isolierte Knoten und Brücken sind keine BCC.
+ \sourcecode{graph/articulationPoints.cpp}
+\end{algorithm}
+\vfill\null\columnbreak
+
+\begin{algorithm}{2-SAT}
+ \sourcecode{graph/2sat.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Maximal Cliques}
+ \begin{methods}
+ \method{bronKerbosch}{berechnet alle maximalen Cliquen}{3^\frac{n}{3}}
+ \method{addEdge}{fügt \textbf{ungerichtete} Kante ein}{1}
+ \end{methods}
+ \sourcecode{graph/bronKerbosch.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Cycle Counting}
+ \begin{methods}
+ \method{findBase}{berechnet Basis}{\abs{V}\cdot\abs{E}}
+ \method{count}{zählt Zykel}{2^{\abs{\mathit{base}}}}
+ \end{methods}
+ \begin{itemize}
+ \item jeder Zyklus ist das xor von einträgen in \code{base}.
+ \end{itemize}
+ \sourcecode{graph/cycleCounting.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Wert des maximalen Matchings}
+ Fehlerwahrscheinlichkeit: $\left(\frac{m}{MOD}\right)^I$
+ \sourcecode{graph/matching.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Allgemeines maximales Matching}
+ \begin{methods}
+ \method{match}{berechnet algemeines Matching}{\abs{E}\*\abs{V}\*\log(\abs{V})}
+ \end{methods}
+ \sourcecode{graph/blossom.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Rerooting Template}
+ \sourcecode{graph/reroot.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Virtual Trees}
+ \sourcecode{graph/virtualTree.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Maximum Cardinatlity Bipartite Matching}
+ \label{kuhn}
+ \begin{methods}
+ \method{kuhn}{berechnet Matching}{\abs{V}\*\min(ans^2, \abs{E})}
+ \end{methods}
+ \begin{itemize}
+ \item die ersten [0..l) Knoten in \code{adj} sind die linke Seite des Graphen
+ \end{itemize}
+ \sourcecode{graph/maxCarBiMatch.cpp}
+ \begin{methods}
+ \method{hopcroft\_karp}{berechnet Matching}{\sqrt{\abs{V}}\*\abs{E}}
+ \end{methods}
+ \sourcecode{graph/hopcroftKarp.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Global Mincut}
+ \begin{methods}
+ \method{stoer\_wagner}{berechnet globalen Mincut}{\abs{V}\abs{E}+\abs{V}^2\*\log(\abs{E})}
+ \method{merge(a,b)}{merged Knoten $b$ in Knoten $a$}{\abs{E}}
+ \end{methods}
+ \textbf{Tipp:} Cut Rekonstruktion mit \code{unionFind} für Partitionierung oder \code{vector<bool>} für edge id's im cut.
+ \sourcecode{graph/stoerWagner.cpp}
+\end{algorithm}
+
+\subsection{Max-Flow}
+
+\optional{
+\subsubsection{Push Relabel}
+\begin{methods}
+ \method{maxFlow}{gut bei sehr dicht besetzten Graphen.}{\abs{V}^2\*\sqrt{\abs{E}}}
+ \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1}
+\end{methods}
+\sourcecode{graph/pushRelabel.cpp}
+}
+
+\begin{algorithm}{Min-Cost-Max-Flow}
+ \begin{methods}
+ \method{mincostflow}{berechnet Fluss}{\abs{V}^2\cdot\abs{E}^2}
+ \end{methods}
+ \sourcecode{graph/minCostMaxFlow.cpp}
+\end{algorithm}
+
+\subsubsection{Dinic's Algorithm mit Capacity Scaling}
+\begin{methods}
+ \method{maxFlow}{doppelt so schnell wie Ford Fulkerson}{\abs{V}^2\cdot\abs{E}}
+ \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1}
+\end{methods}
+\sourcecode{graph/dinicScaling.cpp}
+\vfill\null
+\columnbreak
+
+\optional{
+\subsubsection{Anwendungen}
+\begin{itemize}
+ \item \textbf{Maximum Edge Disjoint Paths}\newline
+ Finde die maximale Anzahl Pfade von $s$ nach $t$, die keine Kante teilen.
+ \begin{enumerate}
+ \item Setze $s$ als Quelle, $t$ als Senke und die Kapazität jeder Kante auf 1.
+ \item Der maximale Fluss entspricht den unterschiedlichen Pfaden ohne gemeinsame Kanten.
+ \end{enumerate}
+ \item \textbf{Maximum Independent Paths}\newline
+ Finde die maximale Anzahl an Pfaden von $s$ nach $t$, die keinen Knoten teilen.
+ \begin{enumerate}
+ \item Setze $s$ als Quelle, $t$ als Senke und die Kapazität jeder Kante \emph{und jedes Knotens} auf 1.
+ \item Der maximale Fluss entspricht den unterschiedlichen Pfaden ohne gemeinsame Knoten.
+ \end{enumerate}
+ \item \textbf{Min-Cut}\newline
+ Der maximale Fluss ist gleich dem minimalen Schnitt.
+ Bei Quelle $s$ und Senke $t$, partitioniere in $S$ und $T$.
+ Zu $S$ gehören alle Knoten, die im Residualgraphen von $s$ aus erreichbar sind (Rückwärtskanten beachten).
+\end{itemize}
+}
+
+\begin{algorithm}{Maximum Weight Bipartite Matching}
+ \begin{methods}
+ \method{match}{berechnet Matching}{\abs{V}^3}
+ \end{methods}
+ \sourcecode{graph/maxWeightBipartiteMatching.cpp}
+\end{algorithm}
+\vfill\null
+\columnbreak
+
+
+\begin{algorithm}[optional]{TSP}
+ \begin{methods}
+ \method{TSP}{berechnet eine Tour}{n^2\*2^n}
+ \end{methods}
+ \sourcecode{graph/TSP.cpp}
+\end{algorithm}
+
+\begin{algorithm}[optional]{Bitonic TSP}
+ \begin{methods}
+ \method{bitonicTSP}{berechnet eine Bitonische Tour}{n^2}
+ \end{methods}
+ \sourcecode{graph/bitonicTSPsimple.cpp}
+\end{algorithm}
+
diff --git a/content/graph/havelHakimi.cpp b/content/graph/havelHakimi.cpp
new file mode 100644
index 0000000..ac4d67d
--- /dev/null
+++ b/content/graph/havelHakimi.cpp
@@ -0,0 +1,18 @@
+vector<vector<int>> havelHakimi(const vector<int>& deg) {
+ priority_queue<pair<int, int>> pq;
+ for (int i = 0; i < sz(deg); i++) {
+ if (deg[i] > 0) pq.push({deg[i], i});
+ }
+ vector<vector<int>> adj(sz(deg));
+ while (!pq.empty()) {
+ auto [degV, v] = pq.top(); pq.pop();
+ if (sz(pq) < degV) return {}; //impossible
+ vector<pair<int, int>> todo(degV);
+ for (auto& e : todo) e = pq.top(), pq.pop();
+ for (auto [degU, u] : todo) {
+ adj[v].push_back(u);
+ adj[u].push_back(v);
+ if (degU > 1) pq.push({degU - 1, u});
+ }}
+ return adj;
+}
diff --git a/content/graph/hld.cpp b/content/graph/hld.cpp
new file mode 100644
index 0000000..65d3f5c
--- /dev/null
+++ b/content/graph/hld.cpp
@@ -0,0 +1,44 @@
+vector<vector<int>> adj;
+vector<int> sz, in, out, nxt, par;
+int counter;
+
+void dfs_sz(int v = 0, int from = -1) {
+ for (auto& u : adj[v]) if (u != from) {
+ dfs_sz(u, v);
+ sz[v] += sz[u];
+ if (adj[v][0] == from || sz[u] > sz[adj[v][0]]) {
+ swap(u, adj[v][0]); //changes adj!
+}}}
+
+void dfs_hld(int v = 0, int from = -1) {
+ par[v] = from;
+ in[v] = counter++;
+ for (int u : adj[v]) if (u != from) {
+ nxt[u] = (u == adj[v][0]) ? nxt[v] : u;
+ dfs_hld(u, v);
+ }
+ out[v] = counter;
+}
+
+void init(int root = 0) {
+ int n = sz(adj);
+ sz.assign(n, 1), nxt.assign(n, root), par.assign(n, -1);
+ in.resize(n), out.resize(n);
+ counter = 0;
+ dfs_sz(root);
+ dfs_hld(root);
+}
+
+template<typename F>
+void for_intervals(int u, int v, F&& f) {
+ for (;; v = par[nxt[v]]) {
+ if (in[v] < in[u]) swap(u, v);
+ f(max(in[u], in[nxt[v]]), in[v] + 1);
+ if (in[nxt[v]] <= in[u]) return;
+}}
+
+int get_lca(int u, int v) {
+ for (;; v = par[nxt[v]]) {
+ if (in[v] < in[u]) swap(u, v);
+ if (in[nxt[v]] <= in[u]) return u;
+}}
diff --git a/content/graph/hopcroftKarp.cpp b/content/graph/hopcroftKarp.cpp
new file mode 100644
index 0000000..c1f5d1c
--- /dev/null
+++ b/content/graph/hopcroftKarp.cpp
@@ -0,0 +1,47 @@
+vector<vector<int>> adj;
+// pairs ist der gematchte Knoten oder -1
+vector<int> pairs, dist, ptr;
+
+bool bfs(int l) {
+ queue<int> q;
+ for(int v = 0; v < l; v++) {
+ if (pairs[v] < 0) {dist[v] = 0; q.push(v);}
+ else dist[v] = -1;
+ }
+ bool exist = false;
+ while(!q.empty()) {
+ int v = q.front(); q.pop();
+ for (int u : adj[v]) {
+ if (pairs[u] < 0) {exist = true; continue;}
+ if (dist[pairs[u]] < 0) {
+ dist[pairs[u]] = dist[v] + 1;
+ q.push(pairs[u]);
+ }}}
+ return exist;
+}
+
+bool dfs(int v) {
+ for (; ptr[v] < sz(adj[v]); ptr[v]++) {
+ int u = adj[v][ptr[v]];
+ if (pairs[u] < 0 ||
+ (dist[pairs[u]] > dist[v] && dfs(pairs[u]))) {
+ pairs[u] = v; pairs[v] = u;
+ return true;
+ }}
+ return false;
+}
+
+int hopcroft_karp(int l) { // l = #Knoten links
+ int ans = 0;
+ pairs.assign(sz(adj), -1);
+ dist.resize(l);
+ // Greedy Matching, optionale Beschleunigung.
+ for (int v = 0; v < l; v++) for (int u : adj[v])
+ if (pairs[u] < 0) {pairs[u] = v; pairs[v] = u; ans++; break;}
+ while(bfs(l)) {
+ ptr.assign(l, 0);
+ for(int v = 0; v < l; v++) {
+ if (pairs[v] < 0) ans += dfs(v);
+ }}
+ return ans;
+}
diff --git a/content/graph/kruskal.cpp b/content/graph/kruskal.cpp
new file mode 100644
index 0000000..987d30b
--- /dev/null
+++ b/content/graph/kruskal.cpp
@@ -0,0 +1,9 @@
+sort(all(edges));
+vector<Edge> mst;
+ll cost = 0;
+for (Edge& e : edges) {
+ if (findSet(e.from) != findSet(e.to)) {
+ unionSets(e.from, e.to);
+ mst.push_back(e);
+ cost += e.cost;
+}}
diff --git a/content/graph/matching.cpp b/content/graph/matching.cpp
new file mode 100644
index 0000000..dcaea8c
--- /dev/null
+++ b/content/graph/matching.cpp
@@ -0,0 +1,23 @@
+constexpr int MOD=1'000'000'007, I=10;
+vector<vector<ll>> adj, mat;
+
+int max_matching() {
+ int ans = 0;
+ mat.assign(sz(adj), {});
+ for (int _ = 0; _ < I; _++) {
+ for (int v = 0; v < sz(adj); v++) {
+ mat[v].assign(sz(adj), 0);
+ for (int u : adj[v]) {
+ if (u < v) {
+ mat[v][u] = rand() % (MOD - 1) + 1;
+ mat[u][v] = MOD - mat[v][u];
+ }}}
+ gauss(sz(adj), MOD); //LGS @\sourceref{math/lgsFp.cpp}@
+ int rank = 0;
+ for (auto& row : mat) {
+ if (*max_element(all(row)) != 0) rank++;
+ }
+ ans = max(ans, rank / 2);
+ }
+ return ans;
+}
diff --git a/content/graph/maxCarBiMatch.cpp b/content/graph/maxCarBiMatch.cpp
new file mode 100644
index 0000000..e928387
--- /dev/null
+++ b/content/graph/maxCarBiMatch.cpp
@@ -0,0 +1,25 @@
+vector<vector<int>> adj;
+vector<int> pairs; // Der gematchte Knoten oder -1.
+vector<bool> visited;
+
+bool dfs(int v) {
+ if (visited[v]) return false;
+ visited[v] = true;
+ for (int u : adj[v]) if (pairs[u] < 0 || dfs(pairs[u])) {
+ pairs[u] = v; pairs[v] = u; return true;
+ }
+ return false;
+}
+
+int kuhn(int l) { // l = #Knoten links.
+ pairs.assign(sz(adj), -1);
+ int ans = 0;
+ // Greedy Matching. Optionale Beschleunigung.
+ for (int v = 0; v < l; v++) for (int u : adj[v])
+ if (pairs[u] < 0) {pairs[u] = v; pairs[v] = u; ans++; break;}
+ for (int v = 0; v < l; v++) if (pairs[v] < 0) {
+ visited.assign(l, false);
+ ans += dfs(v);
+ }
+ return ans; // Größe des Matchings.
+}
diff --git a/content/graph/maxWeightBipartiteMatching.cpp b/content/graph/maxWeightBipartiteMatching.cpp
new file mode 100644
index 0000000..a2b0a80
--- /dev/null
+++ b/content/graph/maxWeightBipartiteMatching.cpp
@@ -0,0 +1,50 @@
+double costs[N_LEFT][N_RIGHT];
+
+// Es muss l<=r sein! (sonst Endlosschleife)
+double match(int l, int r) {
+ vector<double> lx(l), ly(r);
+ //xy is matching from l->r, yx from r->l, or -1
+ vector<int> xy(l, -1), yx(r, -1);
+ vector<pair<double, int>> slack(r);
+
+ for (int x = 0; x < l; x++)
+ lx[x] = *max_element(costs[x], costs[x] + r);
+ for (int root = 0; root < l; root++) {
+ vector<int> aug(r, -1);
+ vector<bool> s(l);
+ s[root] = true;
+ for (int y = 0; y < r; y++) {
+ slack[y] = {lx[root] + ly[y] - costs[root][y], root};
+ }
+ int y = -1;
+ while (true) {
+ double delta = INF;
+ int x = -1;
+ for (int yy = 0; yy < r; yy++) {
+ if (aug[yy] < 0 && slack[yy].first < delta) {
+ tie(delta, x) = slack[yy];
+ y = yy;
+ }}
+ if (delta > 0) {
+ for (int x = 0; x < l; x++) if (s[x]) lx[x] -= delta;
+ for (int y = 0; y < r; y++) {
+ if (aug[y] >= 0) ly[y] += delta;
+ else slack[y].first -= delta;
+ }}
+ aug[y] = x;
+ x = yx[y];
+ if (x < 0) break;
+ s[x] = true;
+ for (int y = 0; y < r; y++) {
+ if (aug[y] < 0) {
+ double alt = lx[x] + ly[y] - costs[x][y];
+ if (slack[y].first > alt) {
+ slack[y] = {alt, x};
+ }}}}
+ while (y >= 0) {
+ yx[y] = aug[y];
+ swap(y, xy[aug[y]]);
+ }}
+ return accumulate(all(lx), 0.0) +
+ accumulate(all(ly), 0.0); // Wert des Matchings
+}
diff --git a/content/graph/minCostMaxFlow.cpp b/content/graph/minCostMaxFlow.cpp
new file mode 100644
index 0000000..14a222c
--- /dev/null
+++ b/content/graph/minCostMaxFlow.cpp
@@ -0,0 +1,66 @@
+constexpr ll INF = 1LL << 60; // Größer als der maximale Fluss.
+struct MinCostFlow {
+ struct edge {
+ int to;
+ ll f, cost;
+ };
+ vector<edge> edges;
+ vector<vector<int>> adj;
+ vector<int> pref, con;
+ vector<ll> dist;
+ const int s, t;
+ ll maxflow, mincost;
+
+ MinCostFlow(int n, int source, int target) :
+ adj(n), s(source), t(target) {};
+
+ void addEdge(int u, int v, ll c, ll cost) {
+ adj[u].push_back(sz(edges));
+ edges.push_back({v, c, cost});
+ adj[v].push_back(sz(edges));
+ edges.push_back({u, 0, -cost});
+ }
+
+ bool SPFA() {
+ pref.assign(sz(adj), -1);
+ dist.assign(sz(adj), INF);
+ vector<bool> inqueue(sz(adj));
+ queue<int> queue;
+ dist[s] = 0;
+ queue.push(s);
+ pref[s] = s;
+ inqueue[s] = true;
+ while (!queue.empty()) {
+ int cur = queue.front(); queue.pop();
+ inqueue[cur] = false;
+ for (int id : adj[cur]) {
+ int to = edges[id].to;
+ if (edges[id].f > 0 &&
+ dist[to] > dist[cur] + edges[id].cost) {
+ dist[to] = dist[cur] + edges[id].cost;
+ pref[to] = cur;
+ con[to] = id;
+ if (!inqueue[to]) {
+ inqueue[to] = true;
+ queue.push(to);
+ }}}}
+ return pref[t] != -1;
+ }
+
+ void extend() {
+ ll w = INF;
+ for (int u = t; pref[u] != u; u = pref[u])
+ w = min(w, edges[con[u]].f);
+ maxflow += w;
+ mincost += dist[t] * w;
+ for (int u = t; pref[u] != u; u = pref[u]) {
+ edges[con[u]].f -= w;
+ edges[con[u] ^ 1].f += w;
+ }}
+
+ void mincostflow() {
+ con.assign(sz(adj), 0);
+ maxflow = mincost = 0;
+ while (SPFA()) extend();
+ }
+};
diff --git a/content/graph/pushRelabel.cpp b/content/graph/pushRelabel.cpp
new file mode 100644
index 0000000..73a9eae
--- /dev/null
+++ b/content/graph/pushRelabel.cpp
@@ -0,0 +1,64 @@
+struct Edge {
+ int to, rev;
+ ll f, c;
+};
+
+vector<vector<Edge>> adj;
+vector<vector<int>> hs;
+vector<ll> ec;
+vector<int> cur, H;
+
+void addEdge(int u, int v, ll c) {
+ adj[u].push_back({v, (int)sz(adj[v]), 0, c});
+ adj[v].push_back({u, (int)sz(adj[u])-1, 0, 0});
+}
+
+void addFlow(Edge& e, ll f) {
+ if (ec[e.to] == 0 && f > 0)
+ hs[H[e.to]].push_back(e.to);
+ e.f += f;
+ adj[e.to][e.rev].f -= f;
+ ec[e.to] += f;
+ ec[adj[e.to][e.rev].to] -= f;
+}
+
+ll maxFlow(int s, int t) {
+ int n = sz(adj);
+ hs.assign(2*n, {});
+ ec.assign(n, 0);
+ cur.assign(n, 0);
+ H.assign(n, 0);
+ H[s] = n;
+ ec[t] = 1;//never set t to active...
+ vector<int> co(2*n);
+ co[0] = n - 1;
+ for (Edge& e : adj[s]) addFlow(e, e.c);
+ for (int hi = 0;;) {
+ while (hs[hi].empty()) if (!hi--) return -ec[s];
+ int v = hs[hi].back();
+ hs[hi].pop_back();
+ while (ec[v] > 0) {
+ if (cur[v] == sz(adj[v])) {
+ H[v] = 2*n;
+ for (int i = 0; i < sz(adj[v]); i++) {
+ Edge& e = adj[v][i];
+ if (e.c - e.f > 0 &&
+ H[v] > H[e.to] + 1) {
+ H[v] = H[e.to] + 1;
+ cur[v] = i;
+ }}
+ co[H[v]]++;
+ if (!--co[hi] && hi < n) {
+ for (int i = 0; i < n; i++) {
+ if (hi < H[i] && H[i] < n) {
+ co[H[i]]--;
+ H[i] = n + 1;
+ }}}
+ hi = H[v];
+ } else {
+ Edge& e = adj[v][cur[v]];
+ if (e.c - e.f > 0 && H[v] == H[e.to] + 1) {
+ addFlow(adj[v][cur[v]], min(ec[v], e.c - e.f));
+ } else {
+ cur[v]++;
+}}}}}
diff --git a/content/graph/reroot.cpp b/content/graph/reroot.cpp
new file mode 100644
index 0000000..4c6a748
--- /dev/null
+++ b/content/graph/reroot.cpp
@@ -0,0 +1,62 @@
+// Usual Tree DP can be broken down in 4 steps:
+// - Initialize dp[v] = identity
+// - Iterate over all children w and take a value for w
+// by looking at dp[w] and possibly the edge label of v -> w
+// - combine the values of those children
+// usually this operation should be commutative and associative
+// - finalize the dp[v] after iterating over all children
+struct Reroot {
+ using T = ll;
+
+ // identity element
+ T E() {}
+ // x: dp value of child
+ // e: index of edge going to child
+ T takeChild(T x, int e) {}
+ T comb(T x, T y) {}
+ // called after combining all dp values of children
+ T fin(T x, int v) {}
+
+ vector<vector<pair<int, int>>> g;
+ vector<int> ord, pae;
+ vector<T> dp;
+
+ T dfs(int v) {
+ ord.push_back(v);
+ for (auto [w, e] : g[v]) {
+ g[w].erase(find(all(g[w]), pair(v, e^1)));
+ pae[w] = e^1;
+ dp[v] = comb(dp[v], takeChild(dfs(w), e));
+ }
+ return dp[v] = fin(dp[v], v);
+ }
+
+ vector<T> solve(int n, vector<pair<int, int>> edges) {
+ g.resize(n);
+ for (int i = 0; i < n-1; i++) {
+ g[edges[i].first].emplace_back(edges[i].second, 2*i);
+ g[edges[i].second].emplace_back(edges[i].first, 2*i+1);
+ }
+ pae.assign(n, -1);
+ dp.assign(n, E());
+ dfs(0);
+ vector<T> updp(n, E()), res(n, E());
+ for (int v : ord) {
+ vector<T> pref(sz(g[v])+1), suff(sz(g[v])+1);
+ if (v != 0) pref[0] = takeChild(updp[v], pae[v]);
+ for (int i = 0; i < sz(g[v]); i++){
+ auto [u, w] = g[v][i];
+ pref[i+1] = suff[i] = takeChild(dp[u], w);
+ pref[i+1] = comb(pref[i], pref[i+1]);
+ }
+ for (int i = sz(g[v])-1; i >= 0; i--) {
+ suff[i] = comb(suff[i], suff[i+1]);
+ }
+ for (int i = 0; i < sz(g[v]); i++) {
+ updp[g[v][i].first] = fin(comb(pref[i], suff[i+1]), v);
+ }
+ res[v] = fin(pref.back(), v);
+ }
+ return res;
+ }
+};
diff --git a/content/graph/scc.cpp b/content/graph/scc.cpp
new file mode 100644
index 0000000..ac9a40b
--- /dev/null
+++ b/content/graph/scc.cpp
@@ -0,0 +1,32 @@
+vector<vector<int>> adj, sccs;
+int counter;
+vector<bool> inStack;
+vector<int> low, idx, s; //idx enthält Index der SCC pro Knoten.
+
+void visit(int v) {
+ int old = low[v] = counter++;
+ s.push_back(v); inStack[v] = true;
+
+ for (auto u : adj[v]) {
+ if (low[u] < 0) visit(u);
+ if (inStack[u]) low[v] = min(low[v], low[u]);
+ }
+
+ if (old == low[v]) {
+ sccs.push_back({});
+ for (int u = -1; u != v;) {
+ u = s.back(); s.pop_back(); inStack[u] = false;
+ idx[u] = sz(sccs) - 1;
+ sccs.back().push_back(u);
+}}}
+
+void scc() {
+ inStack.assign(sz(adj), false);
+ low.assign(sz(adj), -1);
+ idx.assign(sz(adj), -1);
+ sccs.clear();
+
+ counter = 0;
+ for (int i = 0; i < sz(adj); i++) {
+ if (low[i] < 0) visit(i);
+}}
diff --git a/content/graph/stoerWagner.cpp b/content/graph/stoerWagner.cpp
new file mode 100644
index 0000000..97e667a
--- /dev/null
+++ b/content/graph/stoerWagner.cpp
@@ -0,0 +1,53 @@
+struct Edge {
+ int from, to;
+ ll cap;
+};
+
+vector<vector<Edge>> adj, tmp;
+vector<bool> erased;
+
+void merge(int u, int v) {
+ tmp[u].insert(tmp[u].end(), all(tmp[v]));
+ tmp[v].clear();
+ erased[v] = true;
+ for (auto& vec : tmp) {
+ for (Edge& e : vec) {
+ if (e.from == v) e.from = u;
+ if (e.to == v) e.to = u;
+}}}
+
+ll stoer_wagner() {
+ ll res = INF;
+ tmp = adj;
+ erased.assign(sz(tmp), false);
+ for (int i = 1; i < sz(tmp); i++) {
+ int s = 0;
+ while (erased[s]) s++;
+ priority_queue<pair<ll, int>> pq;
+ pq.push({0, s});
+ vector<ll> con(sz(tmp));
+ ll cur = 0;
+ vector<pair<ll, int>> state;
+ while (!pq.empty()) {
+ int c = pq.top().second;
+ pq.pop();
+ if (con[c] < 0) continue; //already seen
+ con[c] = -1;
+ for (auto e : tmp[c]) {
+ if (con[e.to] >= 0) {//add edge to cut
+ con[e.to] += e.cap;
+ pq.push({con[e.to], e.to});
+ cur += e.cap;
+ } else if (e.to != c) {//remove edge from cut
+ cur -= e.cap;
+ }}
+ state.push_back({cur, c});
+ }
+ int t = state.back().second;
+ state.pop_back();
+ if (state.empty()) return 0; //graph is not connected?!
+ merge(state.back().second, t);
+ res = min(res, state.back().first);
+ }
+ return res;
+}
diff --git a/content/graph/treeIsomorphism.cpp b/content/graph/treeIsomorphism.cpp
new file mode 100644
index 0000000..355fefb
--- /dev/null
+++ b/content/graph/treeIsomorphism.cpp
@@ -0,0 +1,15 @@
+vector<vector<int>> adj;
+map<vector<int>, int> known; // dont reset!
+
+int treeLabel(int v, int from = -1) {
+ vector<int> children;
+ for (int u : adj[v]) {
+ if (u == from) continue;
+ children.push_back(treeLabel(u, v));
+ }
+ sort(all(children));
+ if (known.find(children) == known.end()) {
+ known[children] = sz(known);
+ }
+ return known[children];
+}
diff --git a/content/graph/virtualTree.cpp b/content/graph/virtualTree.cpp
new file mode 100644
index 0000000..27d2d6c
--- /dev/null
+++ b/content/graph/virtualTree.cpp
@@ -0,0 +1,22 @@
+// needs dfs in- and out- time and lca function
+vector<int> in, out;
+
+void virtualTree(vector<int> ind) { // indices of used nodes
+ sort(all(ind), [&](int x, int y) {return in[x] < in[y];});
+ for (int i = 0, n = sz(ind); i < n - 1; i++) {
+ ind.push_back(lca(ind[i], ind[i + 1]));
+ }
+ sort(all(ind), [&](int x, int y) {return in[x] < in[y];});
+ ind.erase(unique(all(ind)), ind.end());
+
+ int n = ind.size();
+ vector<vector<int>> tree(n);
+ vector<int> st = {0};
+ for (int i = 1; i < n; i++) {
+ while (in[ind[i]] >= out[ind[st.back()]]) st.pop_back();
+ tree[st.back()].push_back(i);
+ st.push_back(i);
+ }
+ // virtual directed tree with n nodes, original indices in ind
+ // weights can be calculated, e.g. with binary lifting
+}
diff --git a/content/latexHeaders/code.sty b/content/latexHeaders/code.sty
new file mode 100644
index 0000000..3ebdda3
--- /dev/null
+++ b/content/latexHeaders/code.sty
@@ -0,0 +1,141 @@
+% Colors, used for syntax highlighting.
+% To print this document, set all colors to black!
+\usepackage{xcolor}
+\definecolor{safeRed}{HTML}{D7191C}
+\definecolor{safeOrange}{HTML}{FFDE71}
+\definecolor{safeYellow}{HTML}{FFFFBF}
+\definecolor{safeGreen}{HTML}{99CF8F}
+\definecolor{safeBlue}{HTML}{2B83BA}
+
+%try printer friendly colors?
+%\colorlet{keyword}{safeBlue}
+%\colorlet{string}{safeRed}
+%\colorlet{comment}{safeGreen}
+%\colorlet{identifier}{black}
+\definecolor{type}{HTML}{2750A0}
+\definecolor{string}{HTML}{7B3294}
+\definecolor{comment}{HTML}{1A9641}
+\definecolor{identifier}{HTML}{000000}
+\definecolor{keyword}{HTML}{900000}
+
+% Source code listings.
+\usepackage[scaled=0.80]{beramono}
+
+\usepackage{listings}
+\lstset{
+ language={[11]C++},
+ numbers=left,
+ stepnumber=1,
+ numbersep=6pt,
+ numberstyle=\small,
+ breaklines=true,
+ breakautoindent=true,
+ breakatwhitespace=false,
+ numberblanklines=true,
+ postbreak=\space,
+ tabsize=2,
+ upquote=true,
+ basicstyle=\ttfamily\normalsize,
+ showspaces=false,
+ showstringspaces=false,
+ extendedchars=true,
+ keywordstyle=\color{keyword}\bfseries,
+ stringstyle=\color{string}\bfseries,
+ commentstyle=\color{comment}\bfseries\itshape,
+ identifierstyle=\color{identifier},
+ directivestyle=\color{keyword}\bfseries,
+ emph={auto, int, long, long long, float, double, long double, char, bool, void, ll, ld, pt, lll, __int128, __float128, true, false, this, nullptr, INF, inf, EPS, eps},
+ emphstyle=\color{type}\bfseries,
+ frame=trbl,
+ aboveskip=3pt,
+ belowskip=3pt,
+ deletestring=[b]{'},%fix digit separator but break char highlighting (fixed again with literate)
+ escapechar=@
+ %moredelim=**[is][{\btHL[fill=green!30,draw=red,dashed,thin]}]{@}{@}
+}
+
+\newcommand{\formatChar}[1]{{\color{string}\bfseries\textquotesingle{}#1\textquotesingle{}}}
+
+% Listings doesn't support UTF8. This is just enough for German umlauts. and commonly used chars
+\lstset{literate=%
+ {'a'}{{\formatChar{a}}}3
+ {'z'}{{\formatChar{z}}}3
+ {'A'}{{\formatChar{A}}}3
+ {'Z'}{{\formatChar{Z}}}3
+ {'0'}{{\formatChar{0}}}3
+ {'1'}{{\formatChar{1}}}3
+ {'\$'}{{\formatChar{\$}}}3
+ {'\#'}{{\formatChar{\#}}}3
+ {Ö}{{\"O}}1
+ {Ä}{{\"A}}1
+ {Ü}{{\"U}}1
+ {ß}{{\ss}}1
+ {ü}{{\"u}}1
+ {ä}{{\"a}}1
+ {ö}{{\"o}}1
+ {~}{{\textasciitilde}}1
+}
+
+\makeatletter
+\let\orig@lstnumber=\thelstnumber
+\newcommand\lstresetnumber{\global\let\thelstnumber=\orig@lstnumber}
+\let\orig@placelstnumber=\lst@PlaceNumber
+\gdef\lst@PlaceNumber{\orig@placelstnumber\lstresetnumber}
+\newcommand\lstsettmpnumber[1]{\gdef\thelstnumber{#1}}
+
+\lst@AddToHook{OnEmptyLine}{%
+ \ifnum\value{lstnumber}>99
+ \lstsettmpnumber{\_\_\_}
+ \else\ifnum\value{lstnumber}>9
+ \lstsettmpnumber{\_\_}
+ \else
+ \lstsettmpnumber{\_}
+ \fi\fi
+% \lstsettmpnumber{\_\_\kern-6pt}%
+ \vspace{-1.75ex}%
+ \addtocounter{lstnumber}{-1}%
+}
+% old: (change numberblanklines=false!)
+%\lst@AddToHook{OnEmptyLine}{%
+% \vspace{\dimexpr\baselineskip+0.5em}%
+% \addtocounter{lstnumber}{-1}%
+%}
+
+\newenvironment{btHighlight}[1][]
+{\begingroup\tikzset{bt@Highlight@par/.style={#1}}\begin{lrbox}{\@tempboxa}}
+{\end{lrbox}\bt@HL@box[bt@Highlight@par]{\@tempboxa}\endgroup}
+
+\newcommand\btHL[1][]{%
+ \begin{btHighlight}[#1]\bgroup\aftergroup\bt@HL@endenv%
+ }
+ \def\bt@HL@endenv{%
+ \end{btHighlight}%
+ \egroup%
+}
+\newcommand{\bt@HL@box}[2][]{%
+ \tikz[#1]{%
+ \pgfpathrectangle{\pgfpoint{1pt}{0pt}}{\pgfpoint{\wd #2}{\ht #2}}%
+ \pgfusepath{use as bounding box}%
+ \node[anchor=base west, fill=orange!30,outer sep=0pt,inner xsep=2.2pt, inner ysep=0pt, rounded corners=3pt, minimum height=\ht\strutbox+1pt,#1]{\raisebox{1pt}{\strut}\strut\usebox{#2}};
+ }%
+}
+
+\newcommand{\hl}[1]{\btHL[fill=safeOrange,draw=black,thin]{#1}}
+
+\ifthenelse{\isundefined{\gitorigin}}{}{
+ \usepackage{ocgx2}
+ \usepackage{fontawesome}
+ \lst@AddToHook{Init}{%
+ \ifthenelse{\equal{\lst@name}{}}{}{%
+ \begin{minipage}[t][0pt]{\linewidth}%
+ \vspace{0pt}%
+ \hfill%
+ \begin{ocg}[printocg=never]{Source links}{srclinks}{1}%
+ \hfill\href{\gitorigin\lst@name}{\faExternalLink}%
+ \end{ocg}%
+ \end{minipage}%
+ }%
+ }
+}
+\makeatother
+
diff --git a/content/latexHeaders/commands.sty b/content/latexHeaders/commands.sty
new file mode 100644
index 0000000..edbba1b
--- /dev/null
+++ b/content/latexHeaders/commands.sty
@@ -0,0 +1,56 @@
+% custom commands
+\newcommand{\optional}[1]{
+ \ifoptional
+ #1
+ \fi}
+\newcommand{\runtime}[1]{\ensuremath{\mathcal{O}\left(#1\right)}}
+\newcommand{\code}[1]{\lstinline[breaklines=true]{#1}}
+\let\codeSafe\lstinline
+
+\usepackage{tikz}
+\usetikzlibrary{angles,quotes}
+
+
+%new environment to define algorithms
+\usepackage{ifthen}
+\NewDocumentEnvironment{algorithm}{ O{required} m +b }{}{
+ \ifthenelse{\equal{#1}{optional}}{%
+ \optional{
+ \needspace{4\baselineskip}%
+ \subsection{#2\textcolor{gray}{(optional)}}%
+ #3%
+ }
+ }{%
+ \needspace{4\baselineskip}%
+ \subsection{#2}%
+ #3%
+ }
+}
+
+%\ifthenelse{\equal{#3}{}}{}{\runtime{#3}}
+
+\newcommand{\sourcecode}[1]{%
+ \label{code:#1}%
+ \nobreak%
+% \needspace{3\baselineskip}%
+% \nopagebreak%
+ \lstinputlisting{#1}%
+ \penalty -1000%
+}
+\newcommand{\sourceref}[1]{{%
+ \color{comment}\bfseries\itshape{}Seite \pageref{code:#1}%
+}}
+
+\newcommand{\method}[4][]{\texttt{#2}~~#3~~\runtime{#4}#1\par}
+
+\newenvironment{methods}[1][lll]{%
+ %\begin{minipage}{\linewidth}%
+ \renewcommand{\method}[4][]{\texttt{##2}&##3&\ifthenelse{\equal{##4}{}}{}{\runtime{##4}}##1\\}%
+ \begin{tabular}{@{}#1@{}}%
+}{%
+ \end{tabular}%
+ %\end{minipage}%
+ \nobreak%
+ \needspace{3\baselineskip}%
+ \nobreak%
+}
diff --git a/content/latexHeaders/layout.sty b/content/latexHeaders/layout.sty
new file mode 100644
index 0000000..096cf23
--- /dev/null
+++ b/content/latexHeaders/layout.sty
@@ -0,0 +1,82 @@
+% Don't waste space at the page borders. Use two column layout.
+\usepackage[
+ top=2cm,
+ bottom=1cm,
+ left=1cm,
+ right=1cm,
+ landscape
+]{geometry}
+
+% Headline and bottomline.
+\usepackage{scrlayer-scrpage}
+\pagestyle{scrheadings}
+\clearscrheadfoot
+\ihead{\university}
+\chead{\teamname}
+\ohead{\pagemark}
+
+% Shift the title up to waste less space.
+\usepackage{titling}
+\setlength{\droptitle}{-8em}
+
+% Multicol layout for the table of contents.
+\usepackage{multicol}
+\usepackage{multirow}
+\usepackage{array}
+
+% Automatically have table fill horizontal space.
+\usepackage{makecell}
+\usepackage{tabularx}
+\newcolumntype{C}{>{\centering\arraybackslash}X}
+\newcolumntype{L}{>{\raggedright\arraybackslash}X}
+\newcolumntype{R}{>{\raggedleft\arraybackslash}X}
+\newcolumntype{I}{!{\color{lightgray}\vrule}}
+\usepackage{colortbl}
+\newcommand{\grayhline}{\arrayrulecolor{lightgray}\hline
+ \arrayrulecolor{black}}
+
+% Nice table line.
+\usepackage{booktabs}
+
+% Dingbats symbols.
+\usepackage{pifont}
+
+% use less space...
+%\usepackage[subtle, sections, indent, leading, charwidths]{savetrees}
+\usepackage[moderate,sections]{savetrees}
+\RedeclareSectionCommands[
+ beforeskip=1pt plus 5pt,
+ afterskip=0.1pt plus 1.5pt
+]{section,subsection,subsubsection}
+\RedeclareSectionCommands[
+ beforeskip=1pt plus 5pt,
+ afterskip=-1.2ex
+]{paragraph}
+
+% dont indent paragagraphs
+\setlength{\parindent}{0em}
+\parskip=0pt
+
+% dont encourage breaks before lists
+\@beginparpenalty=10000
+
+% Nice enumerations without wasting space above and below.
+\usepackage{relsize}
+\usepackage{enumitem}
+\setlist{nosep,leftmargin=2ex,labelwidth=1ex,labelsep=1ex}
+\setlist[2]{leftmargin=3ex,label=\smaller[2]\ding{228}}
+\setlist[3]{leftmargin=3ex,label=\larger\textbf{--}}
+\setlist[description]{leftmargin=0pt}
+
+% decrease space for tables
+\tabcolsep=2pt
+\setlength\extrarowheight{0.3pt plus 1pt}
+
+\newenvironment{expandtable}{%
+ \begin{addmargin}{-3.4pt}
+}{%
+ \end{addmargin}
+}
+
+\usepackage{needspace}
+\usepackage{setspace}
diff --git a/content/latexHeaders/math.sty b/content/latexHeaders/math.sty
new file mode 100644
index 0000000..c34cc99
--- /dev/null
+++ b/content/latexHeaders/math.sty
@@ -0,0 +1,98 @@
+% For Headlines with math
+\usepackage{bm}
+
+% Display math.
+\usepackage{amsmath}
+\usepackage{mathtools}
+\usepackage{amssymb}
+\usepackage{ntheorem}
+
+%\usepackage{pxfonts}
+\usepackage[scaled=0.945,largesc,looser]{newpxtext}%better than pxfonts...
+\usepackage[scaled=0.945,bigdelims]{newpxmath}
+\let\mathbb\vmathbb
+
+\DeclareFontFamily{LMX}{npxexx}{}
+\DeclareFontShape{LMX}{npxexx}{m}{n}{<-> s * [1.045] zplexx}{}
+\DeclareFontShape{LMX}{npxexx}{b}{n}{<-> s * [1.045] zplbexx}{}
+%\DeclareFontShape{LMX}{npxexx}{m}{n}{<-> s * [0.78] zplexx}{}
+%\DeclareFontShape{LMX}{npxexx}{b}{n}{<-> s * [0.78] zplbexx}{}
+\DeclareFontShape{LMX}{npxexx}{bx}{n}{<->ssub * npxexx/b/n}{}
+
+%\usepackage[scaled=0.91]{XCharter}
+%\usepackage[scaled=0.89,type1]{cabin}% sans serif
+%\usepackage[charter,varbb,scaled=1.00,noxchvw]{newtxmath}
+
+%\usepackage{libertine}
+%\usepackage[libertine]{newtxmath}
+
+% New enviroment for remarks.
+\theoremstyle{break}
+\newtheorem{bem}{Bemerkung}
+
+% New commands for math operators.
+% Binomial coefficients.
+\renewcommand{\binom}[2]{
+ \Bigl(
+ \begin{matrix}
+ #1 \\
+ #2
+ \end{matrix}
+ \Bigr)
+}
+% Euler numbers, first kind.
+\newcommand{\eulerI}[2]{
+ \Bigl\langle
+ \begin{matrix}
+ #1 \\
+ #2
+ \end{matrix}
+ \Bigr\rangle
+}
+% Euler numbers, second kind.
+\newcommand{\eulerII}[2]{
+ \Bigl\langle\mkern-4mu\Bigl\langle
+ \begin{matrix}
+ #1 \\
+ #2
+ \end{matrix}
+ \Bigr\rangle\mkern-4mu\Bigr\rangle
+}
+% Stirling numbers, first kind.
+\newcommand{\stirlingI}[2]{
+ \Bigl[
+ \begin{matrix}
+ #1 \\
+ #2
+ \end{matrix}
+ \Bigr]
+}
+% Stirling numbers, second kind.
+\newcommand{\stirlingII}[2]{
+ \Bigl\{
+ \begin{matrix}
+ #1 \\
+ #2
+ \end{matrix}
+ \Bigr\}
+}
+% Legendre symbol.
+\newcommand{\legendre}[2]{
+ \Bigl(
+ \dfrac{#1}{#2}
+ \Bigr)
+}
+% Expectation values.
+\newcommand{\E}{\text{E}}
+% Greates common divisor.
+\newcommand{\ggT}{\text{ggT}}
+% sign for negative values
+\newcommand{\sign}{\scalebox{0.66}[1.0]{\( - \)}}
+% absolute values
+\newcommand{\abs}[1]{\left|#1\right|}
+% ceiling function
+\newcommand{\ceil}[1]{\left\lceil#1\right\rceil}
+% floor function
+\newcommand{\floor}[1]{\left\lfloor#1\right\rfloor}
+% multiplication
+\renewcommand{\*}{\ensuremath{\cdotp}}
diff --git a/content/math/berlekampMassey.cpp b/content/math/berlekampMassey.cpp
new file mode 100644
index 0000000..29e084f
--- /dev/null
+++ b/content/math/berlekampMassey.cpp
@@ -0,0 +1,31 @@
+constexpr ll mod = 1'000'000'007;
+vector<ll> BerlekampMassey(const vector<ll>& s) {
+ int n = sz(s), L = 0, m = 0;
+ vector<ll> C(n), B(n), T;
+ C[0] = B[0] = 1;
+
+ ll b = 1;
+ for (int i = 0; i < n; i++) {
+ m++;
+ ll d = s[i] % mod;
+ for (int j = 1; j <= L; j++) {
+ d = (d + C[j] * s[i - j]) % mod;
+ }
+ if (!d) continue;
+ T = C;
+ ll coef = d * powMod(b, mod-2, mod) % mod;
+ for (int j = m; j < n; j++) {
+ C[j] = (C[j] - coef * B[j - m]) % mod;
+ }
+ if (2 * L > i) continue;
+ L = i + 1 - L;
+ swap(B, T);
+ b = d;
+ m = 0;
+ }
+
+ C.resize(L + 1);
+ C.erase(C.begin());
+ for (auto& x : C) x = (mod - x) % mod;
+ return C;
+}
diff --git a/content/math/bigint.cpp b/content/math/bigint.cpp
new file mode 100644
index 0000000..1b3b953
--- /dev/null
+++ b/content/math/bigint.cpp
@@ -0,0 +1,271 @@
+// base and base_digits must be consistent
+constexpr ll base = 1'000'000;
+constexpr ll base_digits = 6;
+struct bigint {
+ using vll = vector<ll>;
+ vll a; ll sign;
+
+ bigint() : sign(1) {}
+
+ bigint(ll v) {*this = v;}
+
+ bigint(const string &s) {read(s);}
+
+ void operator=(ll v) {
+ sign = 1;
+ if (v < 0) sign = -1, v = -v;
+ a.clear();
+ for (; v > 0; v = v / base)
+ a.push_back(v % base);
+ }
+
+ bigint operator+(const bigint& v) const {
+ if (sign == v.sign) {
+ bigint res = v;
+ for (ll i = 0, carry = 0; i < max(sz(a), sz(v.a)) || carry; ++i) {
+ if (i == sz(res.a))
+ res.a.push_back(0);
+ res.a[i] += carry + (i < sz(a) ? a[i] : 0);
+ carry = res.a[i] >= base;
+ if (carry)
+ res.a[i] -= base;
+ }
+ return res;
+ }
+ return *this - (-v);
+ }
+
+ bigint operator-(const bigint& v) const {
+ if (sign == v.sign) {
+ if (abs() >= v.abs()) {
+ bigint res = *this;
+ for (ll i = 0, carry = 0; i < sz(v.a) || carry; ++i) {
+ res.a[i] -= carry + (i < sz(v.a) ? v.a[i] : 0);
+ carry = res.a[i] < 0;
+ if (carry) res.a[i] += base;
+ }
+ res.trim();
+ return res;
+ }
+ return -(v - *this);
+ }
+ return *this + (-v);
+ }
+
+ void operator*=(ll v) {
+ if (v < 0) sign = -sign, v = -v;
+ for (ll i = 0, carry = 0; i < sz(a) || carry; ++i) {
+ if (i == sz(a)) a.push_back(0);
+ ll cur = a[i] * v + carry;
+ carry = cur / base;
+ a[i] = cur % base;
+ }
+ trim();
+ }
+
+ bigint operator*(ll v) const {
+ bigint res = *this;
+ res *= v;
+ return res;
+ }
+
+ friend pair<bigint, bigint> divmod(const bigint& a1, const bigint& b1) {
+ ll norm = base / (b1.a.back() + 1);
+ bigint a = a1.abs() * norm;
+ bigint b = b1.abs() * norm;
+ bigint q, r;
+ q.a.resize(sz(a.a));
+ for (ll i = sz(a.a) - 1; i >= 0; i--) {
+ r *= base;
+ r += a.a[i];
+ ll s1 = sz(r.a) <= sz(b.a) ? 0 : r.a[sz(b.a)];
+ ll s2 = sz(r.a) <= sz(b.a) - 1 ? 0 : r.a[sz(b.a) - 1];
+ ll d = (base * s1 + s2) / b.a.back();
+ r -= b * d;
+ while (r < 0) r += b, --d;
+ q.a[i] = d;
+ }
+ q.sign = a1.sign * b1.sign;
+ r.sign = a1.sign;
+ q.trim();
+ r.trim();
+ return make_pair(q, r / norm);
+ }
+
+ bigint operator/(const bigint& v) const {
+ return divmod(*this, v).first;
+ }
+
+ bigint operator%(const bigint& v) const {
+ return divmod(*this, v).second;
+ }
+
+ void operator/=(ll v) {
+ if (v < 0) sign = -sign, v = -v;
+ for (ll i = sz(a) - 1, rem = 0; i >= 0; --i) {
+ ll cur = a[i] + rem * base;
+ a[i] = cur / v;
+ rem = cur % v;
+ }
+ trim();
+ }
+
+ bigint operator/(ll v) const {
+ bigint res = *this;
+ res /= v;
+ return res;
+ }
+
+ ll operator%(ll v) const {
+ if (v < 0) v = -v;
+ ll m = 0;
+ for (ll i = sz(a) - 1; i >= 0; --i)
+ m = (a[i] + m * base) % v;
+ return m * sign;
+ }
+
+ void operator+=(const bigint& v) {
+ *this = *this + v;
+ }
+ void operator-=(const bigint& v) {
+ *this = *this - v;
+ }
+ void operator*=(const bigint& v) {
+ *this = *this * v;
+ }
+ void operator/=(const bigint& v) {
+ *this = *this / v;
+ }
+
+ bool operator<(const bigint& v) const {
+ if (sign != v.sign) return sign < v.sign;
+ if (sz(a) != sz(v.a))
+ return sz(a) * sign < sz(v.a) * v.sign;
+ for (ll i = sz(a) - 1; i >= 0; i--)
+ if (a[i] != v.a[i])
+ return a[i] * sign < v.a[i] * sign;
+ return false;
+ }
+
+ bool operator>(const bigint& v) const {
+ return v < *this;
+ }
+ bool operator<=(const bigint& v) const {
+ return !(v < *this);
+ }
+ bool operator>=(const bigint& v) const {
+ return !(*this < v);
+ }
+ bool operator==(const bigint& v) const {
+ return !(*this < v) && !(v < *this);
+ }
+ bool operator!=(const bigint& v) const {
+ return *this < v || v < *this;
+ }
+
+ void trim() {
+ while (!a.empty() && !a.back()) a.pop_back();
+ if (a.empty()) sign = 1;
+ }
+
+ bool isZero() const {
+ return a.empty() || (sz(a) == 1 && a[0] == 0);
+ }
+
+ bigint operator-() const {
+ bigint res = *this;
+ res.sign = -sign;
+ return res;
+ }
+
+ bigint abs() const {
+ bigint res = *this;
+ res.sign *= res.sign;
+ return res;
+ }
+
+ ll longValue() const {
+ ll res = 0;
+ for (ll i = sz(a) - 1; i >= 0; i--)
+ res = res * base + a[i];
+ return res * sign;
+ }
+
+ void read(const string& s) {
+ sign = 1;
+ a.clear();
+ ll pos = 0;
+ while (pos < sz(s) && (s[pos] == '-' || s[pos] == '+')) {
+ if (s[pos] == '-') sign = -sign;
+ ++pos;
+ }
+ for (ll i = sz(s) - 1; i >= pos; i -= base_digits) {
+ ll x = 0;
+ for (ll j = max(pos, i - base_digits + 1); j <= i; j++)
+ x = x * 10 + s[j] - '0';
+ a.push_back(x);
+ }
+ trim();
+ }
+
+ friend istream& operator>>(istream& stream, bigint& v) {
+ string s;
+ stream >> s;
+ v.read(s);
+ return stream;
+ }
+
+ friend ostream& operator<<(ostream& stream, const bigint& v) {
+ if (v.sign == -1) stream << '-';
+ stream << (v.a.empty() ? 0 : v.a.back());
+ for (ll i = sz(v.a) - 2; i >= 0; --i)
+ stream << setw(base_digits) << setfill('0') << v.a[i];
+ return stream;
+ }
+
+ static vll karatsubaMultiply(const vll& a, const vll& b) {
+ ll n = sz(a);
+ vll res(n + n);
+ if (n <= 32) {
+ for (ll i = 0; i < n; i++)
+ for (ll j = 0; j < n; j++)
+ res[i + j] += a[i] * b[j];
+ return res;
+ }
+ ll k = n >> 1;
+ vll a1(a.begin(), a.begin() + k);
+ vll a2(a.begin() + k, a.end());
+ vll b1(b.begin(), b.begin() + k);
+ vll b2(b.begin() + k, b.end());
+ vll a1b1 = karatsubaMultiply(a1, b1);
+ vll a2b2 = karatsubaMultiply(a2, b2);
+ for (ll i = 0; i < k; i++) a2[i] += a1[i];
+ for (ll i = 0; i < k; i++) b2[i] += b1[i];
+ vll r = karatsubaMultiply(a2, b2);
+ for (ll i = 0; i < sz(a1b1); i++) r[i] -= a1b1[i];
+ for (ll i = 0; i < sz(a2b2); i++) r[i] -= a2b2[i];
+ for (ll i = 0; i < sz(r); i++) res[i + k] += r[i];
+ for (ll i = 0; i < sz(a1b1); i++) res[i] += a1b1[i];
+ for (ll i = 0; i < sz(a2b2); i++) res[i + n] += a2b2[i];
+ return res;
+ }
+
+ bigint operator*(const bigint& v) const {
+ vll ta(a.begin(), a.end());
+ vll va(v.a.begin(), v.a.end());
+ while (sz(ta) < sz(va)) ta.push_back(0);
+ while (sz(va) < sz(ta)) va.push_back(0);
+ while (sz(ta) & (sz(ta) - 1))
+ ta.push_back(0), va.push_back(0);
+ vll ra = karatsubaMultiply(ta, va);
+ bigint res;
+ res.sign = sign * v.sign;
+ for (ll i = 0, carry = 0; i < sz(ra); i++) {
+ ll cur = ra[i] + carry;
+ res.a.push_back(cur % base);
+ carry = cur / base;
+ }
+ res.trim();
+ return res;
+ }
+};
diff --git a/content/math/binomial0.cpp b/content/math/binomial0.cpp
new file mode 100644
index 0000000..5f2ccaa
--- /dev/null
+++ b/content/math/binomial0.cpp
@@ -0,0 +1,14 @@
+constexpr ll lim = 10'000'000;
+ll fac[lim], inv[lim];
+
+void precalc() {
+ fac[0] = inv[0] = 1;
+ for (int i = 1; i < lim; i++) fac[i] = fac[i-1] * i % mod;
+ inv[lim - 1] = multInv(fac[lim - 1], mod);
+ for (int i = lim - 1; i > 0; i--) inv[i-1] = inv[i] * i % mod;
+}
+
+ll calc_binom(ll n, ll k) {
+ if (n < 0 || n < k || k < 0) return 0;
+ return (inv[k] * inv[n-k] % mod) * fac[n] % mod;
+}
diff --git a/content/math/binomial1.cpp b/content/math/binomial1.cpp
new file mode 100644
index 0000000..dab20b3
--- /dev/null
+++ b/content/math/binomial1.cpp
@@ -0,0 +1,8 @@
+ll calc_binom(ll n, ll k) {
+ if (k > n) return 0;
+ ll r = 1;
+ for (ll d = 1; d <= k; d++) {// Reihenfolge => Teilbarkeit
+ r *= n--, r /= d;
+ }
+ return r;
+}
diff --git a/content/math/binomial2.cpp b/content/math/binomial2.cpp
new file mode 100644
index 0000000..4531505
--- /dev/null
+++ b/content/math/binomial2.cpp
@@ -0,0 +1,32 @@
+constexpr ll mod = 1'000'000'009;
+
+ll binomPPow(ll n, ll k, ll p) {
+ ll res = 1;
+ if (p > n) {
+ } else if (p > n - k || (p * p > n && n % p < k % p)) {
+ res *= p;
+ res %= mod;
+ } else if (p * p <= n) {
+ ll c = 0, tmpN = n, tmpK = k;
+ while (tmpN > 0) {
+ if (tmpN % p < tmpK % p + c) {
+ res *= p;
+ res %= mod;
+ c = 1;
+ } else c = 0;
+ tmpN /= p;
+ tmpK /= p;
+ }}
+ return res;
+}
+
+ll calc_binom(ll n, ll k) {
+ if (k > n) return 0;
+ ll res = 1;
+ k = min(k, n - k);
+ for (ll i = 0; primes[i] <= n; i++) {
+ res *= binomPPow(n, k, primes[i]);
+ res %= mod;
+ }
+ return res;
+}
diff --git a/content/math/binomial3.cpp b/content/math/binomial3.cpp
new file mode 100644
index 0000000..7a6ab4e
--- /dev/null
+++ b/content/math/binomial3.cpp
@@ -0,0 +1,10 @@
+ll calc_binom(ll n, ll k, ll p) {
+ assert(n < p); //wichtig: sonst falsch!
+ if (k > n) return 0;
+ ll x = k % 2 != 0 ? p-1 : 1;
+ for (ll c = p-1; c > n; c--) {
+ x *= c - k; x %= p;
+ x *= multInv(c, p); x %= p;
+ }
+ return x;
+}
diff --git a/content/math/chineseRemainder.cpp b/content/math/chineseRemainder.cpp
new file mode 100644
index 0000000..ccbc5dc
--- /dev/null
+++ b/content/math/chineseRemainder.cpp
@@ -0,0 +1,14 @@
+struct CRT {
+ using lll = __int128;
+ lll M = 1, sol = 0; // Solution unique modulo M
+ bool hasSol = true;
+
+ // Adds congruence x = a (mod m)
+ void add(ll a, ll m) {
+ auto [d, s, t] = extendedEuclid(M, m);
+ if((a - sol) % d != 0) hasSol = false;
+ lll z = M/d * s;
+ M *= m/d;
+ sol = (z % M * (a-sol) % M + sol + M) % M;
+ }
+};
diff --git a/content/math/cycleDetection.cpp b/content/math/cycleDetection.cpp
new file mode 100644
index 0000000..5e68c0c
--- /dev/null
+++ b/content/math/cycleDetection.cpp
@@ -0,0 +1,18 @@
+pair<ll, ll> cycleDetection(ll x0, function<ll(ll)> f) {
+ ll a = x0, b = f(x0), length = 1;
+ for (ll power = 1; a != b; b = f(b), length++) {
+ if (power == length) {
+ power *= 2;
+ length = 0;
+ a = b;
+ }}
+ ll start = 0;
+ a = x0; b = x0;
+ for (ll i = 0; i < length; i++) b = f(b);
+ while (a != b) {
+ a = f(a);
+ b = f(b);
+ start++;
+ }
+ return {start, length};
+}
diff --git a/content/math/discreteLogarithm.cpp b/content/math/discreteLogarithm.cpp
new file mode 100644
index 0000000..68866e0
--- /dev/null
+++ b/content/math/discreteLogarithm.cpp
@@ -0,0 +1,17 @@
+ll dlog(ll a, ll b, ll m) { //a > 0!
+ ll bound = sqrtl(m) + 1; //memory usage bound < p
+ vector<pair<ll, ll>> vals(bound);
+ for (ll i = 0, e = 1; i < bound; i++, e = (e * a) % m) {
+ vals[i] = {e, i};
+ }
+ vals.emplace_back(m, 0);
+ sort(all(vals));
+ ll fact = powMod(a, m - bound - 1, m);
+
+ for (ll i = 0; i < m; i += bound, b = (b * fact) % m) {
+ auto it = lower_bound(all(vals), pair<ll, ll>{b, 0});
+ if (it->first == b) {
+ return (i + it->second) % m;
+ }}
+ return -1;
+}
diff --git a/content/math/discreteNthRoot.cpp b/content/math/discreteNthRoot.cpp
new file mode 100644
index 0000000..403cb3b
--- /dev/null
+++ b/content/math/discreteNthRoot.cpp
@@ -0,0 +1,5 @@
+ll root(ll a, ll b, ll m) { // a > 0!
+ ll g = findPrimitive(m);
+ ll c = dlog(powMod(g, a, m), b, m);
+ return c < 0 ? -1 : powMod(g, c, m);
+}
diff --git a/content/math/divisors.cpp b/content/math/divisors.cpp
new file mode 100644
index 0000000..5afd4fb
--- /dev/null
+++ b/content/math/divisors.cpp
@@ -0,0 +1,11 @@
+ll countDivisors(ll n) {
+ ll res = 1;
+ for (ll i = 2; i * i * i <= n; i++) {
+ ll c = 0;
+ while (n % i == 0) {n /= i; c++;}
+ res *= c + 1;
+ }
+ if (isPrime(n)) res *= 2;
+ else if (n > 1) res *= isSquare(n) ? 3 : 4;
+ return res;
+}
diff --git a/content/math/extendedEuclid.cpp b/content/math/extendedEuclid.cpp
new file mode 100644
index 0000000..ecf4a16
--- /dev/null
+++ b/content/math/extendedEuclid.cpp
@@ -0,0 +1,6 @@
+// a*x + b*y = ggt(a, b)
+array<ll, 3> extendedEuclid(ll a, ll b) {
+ if (a == 0) return {b, 0, 1};
+ auto [d, x, y] = extendedEuclid(b % a, a);
+ return {d, y - (b / a) * x, x};
+}
diff --git a/content/math/gauss.cpp b/content/math/gauss.cpp
new file mode 100644
index 0000000..8129fd2
--- /dev/null
+++ b/content/math/gauss.cpp
@@ -0,0 +1,36 @@
+void normalLine(int line) {
+ double factor = mat[line][line];
+ for (double& x : mat[line]) x /= factor;
+}
+
+void takeAll(int n, int line) {
+ for (int i = 0; i < n; i++) {
+ if (i == line) continue;
+ double diff = mat[i][line];
+ for (int j = 0; j < sz(mat[i]); j++) {
+ mat[i][j] -= diff * mat[line][j];
+}}}
+
+int gauss(int n) {
+ vector<bool> done(n, false);
+ for (int i = 0; i < n; i++) {
+ int swappee = i; // Sucht Pivotzeile für bessere Stabilität.
+ for (int j = 0; j < n; j++) {
+ if (done[j]) continue;
+ if (abs(mat[j][i]) > abs(mat[i][i])) swappee = j;
+ }
+ swap(mat[i], mat[swappee]);
+ if (abs(mat[i][i]) > EPS) {
+ normalLine(i);
+ takeAll(n, i);
+ done[i] = true;
+ }}
+ // Ab jetzt nur checks bzgl. Eindeutigkeit/Existenz der Lösung.
+ for (int i = 0; i < n; i++) {
+ bool allZero = true;
+ for (int j = i; j < n; j++) allZero &= abs(mat[i][j]) <= EPS;
+ if (allZero && abs(mat[i][n]) > EPS) return INCONSISTENT;
+ if (allZero && abs(mat[i][n]) <= EPS) return MULTIPLE;
+ }
+ return UNIQUE;
+}
diff --git a/content/math/gcd-lcm.cpp b/content/math/gcd-lcm.cpp
new file mode 100644
index 0000000..a1c63c8
--- /dev/null
+++ b/content/math/gcd-lcm.cpp
@@ -0,0 +1,2 @@
+ll gcd(ll a, ll b) {return b == 0 ? a : gcd(b, a % b);}
+ll lcm(ll a, ll b) {return a * (b / gcd(a, b));}
diff --git a/content/math/goldenSectionSearch.cpp b/content/math/goldenSectionSearch.cpp
new file mode 100644
index 0000000..28ee4c3
--- /dev/null
+++ b/content/math/goldenSectionSearch.cpp
@@ -0,0 +1,15 @@
+template<typename F>
+ld gss(ld l, ld r, F&& f) {
+ ld inv = (sqrt(5.0l) - 1) / 2;
+ ld x1 = r - inv*(r-l), x2 = l + inv*(r-l);
+ ld f1 = f(x1), f2 = f(x2);
+ for (int i = 0; i < 200; i++) {
+ if (f1 < f2) { //change to > to find maximum
+ r = x2; x2 = x1; f2 = f1;
+ x1 = r - inv*(r-l); f1 = f(x1);
+ } else {
+ l = x1; x1 = x2; f1 = f2;
+ x2 = l + inv*(r-l); f2 = f(x2);
+ }}
+ return l;
+}
diff --git a/content/math/inversions.cpp b/content/math/inversions.cpp
new file mode 100644
index 0000000..9e47f9b
--- /dev/null
+++ b/content/math/inversions.cpp
@@ -0,0 +1,9 @@
+ll inversions(const vector<ll>& v) {
+ Tree<pair<ll, ll>> t; //ordered statistics tree @\sourceref{datastructures/pbds.cpp}@
+ ll res = 0;
+ for (ll i = 0; i < sz(v); i++) {
+ res += i - t.order_of_key({v[i], i});
+ t.insert({v[i], i});
+ }
+ return res;
+}
diff --git a/content/math/inversionsMerge.cpp b/content/math/inversionsMerge.cpp
new file mode 100644
index 0000000..8235b11
--- /dev/null
+++ b/content/math/inversionsMerge.cpp
@@ -0,0 +1,27 @@
+// Laufzeit: O(n*log(n))
+ll merge(vector<ll>& v, vector<ll>& left, vector<ll>& right) {
+ int a = 0, b = 0, i = 0;
+ ll inv = 0;
+ while (a < sz(left) && b < sz(right)) {
+ if (left[a] < right[b]) v[i++] = left[a++];
+ else {
+ inv += sz(left) - a;
+ v[i++] = right[b++];
+ }
+ }
+ while (a < sz(left)) v[i++] = left[a++];
+ while (b < sz(right)) v[i++] = right[b++];
+ return inv;
+}
+
+ll mergeSort(vector<ll> &v) { // Sortiert v und gibt Inversionszahl zurück.
+ int n = sz(v);
+ vector<ll> left(n / 2), right((n + 1) / 2);
+ for (int i = 0; i < n / 2; i++) left[i] = v[i];
+ for (int i = n / 2; i < n; i++) right[i - n / 2] = v[i];
+
+ ll result = 0;
+ if (sz(left) > 1) result += mergeSort(left);
+ if (sz(right) > 1) result += mergeSort(right);
+ return result + merge(v, left, right);
+}
diff --git a/content/math/kthperm.cpp b/content/math/kthperm.cpp
new file mode 100644
index 0000000..504f09c
--- /dev/null
+++ b/content/math/kthperm.cpp
@@ -0,0 +1,14 @@
+vector<ll> kthperm(ll n, ll k) {
+ Tree<ll> t;
+ vector<ll> res(n);
+ for (ll i = 1; i <= n; k /= i, i++) {
+ t.insert(i - 1);
+ res[n - i] = k % i;
+ }
+ for (ll& x : res) {
+ auto it = t.find_by_order(x);
+ x = *it;
+ t.erase(it);
+ }
+ return res;
+}
diff --git a/content/math/legendre.cpp b/content/math/legendre.cpp
new file mode 100644
index 0000000..b85ea2a
--- /dev/null
+++ b/content/math/legendre.cpp
@@ -0,0 +1,4 @@
+ll legendre(ll a, ll p) { // p prim >= 2
+ ll s = powMod(a, p / 2, p);
+ return s < 2 ? s : -1ll;
+}
diff --git a/content/math/lgsFp.cpp b/content/math/lgsFp.cpp
new file mode 100644
index 0000000..0241742
--- /dev/null
+++ b/content/math/lgsFp.cpp
@@ -0,0 +1,26 @@
+void normalLine(int line, ll p) {
+ ll factor = multInv(mat[line][line], p);
+ for (ll& x : mat[line]) x = (x * factor) % p;
+}
+
+void takeAll(int n, int line, ll p) {
+ for (int i = 0; i < n; i++) {
+ if (i == line) continue;
+ ll diff = mat[i][line];
+ for (int j = 0; j < sz(mat[i]); j++) {
+ mat[i][j] -= (diff * mat[line][j]) % p;
+ mat[i][j] = (mat[i][j] + p) % p;
+}}}
+
+void gauss(int n, ll mod) {
+ vector<bool> done(n, false);
+ for (int i = 0; i < n; i++) {
+ int j = 0;
+ while (j < n && (done[j] || mat[j][i] == 0)) j++;
+ if (j == n) continue;
+ swap(mat[i], mat[j]);
+ normalLine(i, mod);
+ takeAll(n, i, mod);
+ done[i] = true;
+}}
+// für Eindeutigkeit, Existenz etc. siehe LGS über R @\sourceref{math/gauss.cpp}@
diff --git a/content/math/linearCongruence.cpp b/content/math/linearCongruence.cpp
new file mode 100644
index 0000000..cdb5a37
--- /dev/null
+++ b/content/math/linearCongruence.cpp
@@ -0,0 +1,5 @@
+ll solveLinearCongruence(ll a, ll b, ll m) {
+ ll g = gcd(a, m);
+ if (b % g != 0) return -1;
+ return ((b / g) * multInv(a / g, m / g)) % (m / g);
+}
diff --git a/content/math/linearRecurence.cpp b/content/math/linearRecurence.cpp
new file mode 100644
index 0000000..2501e64
--- /dev/null
+++ b/content/math/linearRecurence.cpp
@@ -0,0 +1,33 @@
+constexpr ll mod = 1'000'000'007;
+vector<ll> modMul(const vector<ll>& a, const vector<ll>& b,
+ const vector<ll>& c) {
+ ll n = sz(c);
+ vector<ll> res(n * 2 + 1);
+ for (int i = 0; i <= n; i++) { //a*b
+ for (int j = 0; j <= n; j++) {
+ res[i + j] += a[i] * b[j];
+ res[i + j] %= mod;
+ }}
+ for (int i = 2 * n; i > n; i--) { //res%c
+ for (int j = 0; j < n; j++) {
+ res[i - 1 - j] += res[i] * c[j];
+ res[i - 1 - j] %= mod;
+ }}
+ res.resize(n + 1);
+ return res;
+}
+
+ll kthTerm(const vector<ll>& f, const vector<ll>& c, ll k) {
+ assert(sz(f) == sz(c));
+ vector<ll> tmp(sz(c) + 1), a(sz(c) + 1);
+ tmp[0] = a[1] = 1; //tmp = (x^k) % c
+
+ for (k++; k > 0; k /= 2) {
+ if (k & 1) tmp = modMul(tmp, a, c);
+ a = modMul(a, a, c);
+ }
+
+ ll res = 0;
+ for (int i = 0; i < sz(c); i++) res += (tmp[i+1] * f[i]) % mod;
+ return res % mod;
+}
diff --git a/content/math/linearSieve.cpp b/content/math/linearSieve.cpp
new file mode 100644
index 0000000..64440dd
--- /dev/null
+++ b/content/math/linearSieve.cpp
@@ -0,0 +1,50 @@
+constexpr ll N = 10'000'000;
+ll small[N], power[N], sieved[N];
+vector<ll> primes;
+
+//wird aufgerufen mit (p^k, p, k) für prime p und k > 0
+ll mu(ll pk, ll p, ll k) {return -(k == 1);}
+ll phi(ll pk, ll p, ll k) {return pk - pk / p;}
+ll div(ll pk, ll p, ll k) {return k+1;}
+ll divSum(ll pk, ll p, ll k) {return (pk*p-1) / (p - 1);}
+ll square(ll pk, ll p, ll k) {return k % 2 ? pk / p : pk;}
+ll squareFree(ll pk, ll p, ll k) {return p;}
+
+void sieve() { // O(N)
+ small[1] = power[1] = sieved[1] = 1;
+ for (ll i = 2; i < N; i++) {
+ if (small[i] == 0) {
+ primes.push_back(i);
+ for (ll pk = i, k = 1; pk < N; pk *= i, k++) {
+ small[pk] = i;
+ power[pk] = pk;
+ sieved[pk] = mu(pk, i, k); // Aufruf ändern!
+ }}
+ for (ll j=0; i*primes[j] < N && primes[j] < small[i]; j++) {
+ ll k = i * primes[j];
+ small[k] = power[k] = primes[j];
+ sieved[k] = sieved[i] * sieved[primes[j]];
+ }
+ if (i * small[i] < N && power[i] != i) {
+ ll k = i * small[i];
+ small[k] = small[i];
+ power[k] = power[i] * small[i];
+ sieved[k] = sieved[power[k]] * sieved[k / power[k]];
+}}}
+
+ll naive(ll n) { // O(sqrt(n))
+ ll res = 1;
+ for (ll p = 2; p * p <= n; p++) {
+ if (n % p == 0) {
+ ll pk = 1;
+ ll k = 0;
+ do {
+ n /= p;
+ pk *= p;
+ k++;
+ } while (n % p == 0);
+ res *= mu(pk, p, k); // Aufruf ändern!
+ }}
+ if (n > 1) res *= mu(n, n, 1);
+ return res;
+}
diff --git a/content/math/longestIncreasingSubsequence.cpp b/content/math/longestIncreasingSubsequence.cpp
new file mode 100644
index 0000000..fcb63b4
--- /dev/null
+++ b/content/math/longestIncreasingSubsequence.cpp
@@ -0,0 +1,17 @@
+vector<int> lis(vector<ll>& a) {
+ int n = sz(a), len = 0;
+ vector<ll> dp(n, INF), dp_id(n), prev(n);
+ for (int i = 0; i < n; i++) {
+ int pos = lower_bound(all(dp), a[i]) - dp.begin();
+ dp[pos] = a[i];
+ dp_id[pos] = i;
+ prev[i] = pos ? dp_id[pos - 1] : -1;
+ len = max(len, pos + 1);
+ }
+ // reconstruction
+ vector<int> res(len);
+ for (int x = dp_id[len-1]; len--; x = prev[x]) {
+ res[len] = x;
+ }
+ return res; // indices of one LIS
+}
diff --git a/content/math/math.tex b/content/math/math.tex
new file mode 100644
index 0000000..f99d0d4
--- /dev/null
+++ b/content/math/math.tex
@@ -0,0 +1,525 @@
+\section{Mathe}
+
+\begin{algorithm}{Longest Increasing Subsequence}
+ \begin{itemize}
+ \item \code{lower\_bound} $\Rightarrow$ streng monoton
+ \item \code{upper\_bound} $\Rightarrow$ monoton
+ \end{itemize}
+ \sourcecode{math/longestIncreasingSubsequence.cpp}
+\end{algorithm}
+\vfill\null\columnbreak
+
+\begin{algorithm}{Zykel Erkennung}
+ \begin{methods}
+ \method{cycleDetection}{findet Zyklus von $x_0$ und Länge in $f$}{b+l}
+ \end{methods}
+ \sourcecode{math/cycleDetection.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Permutationen}
+ \begin{methods}
+ \method{kthperm}{findet $k$-te Permutation \big($k \in [0, n!$)\big)}{n\*\log(n)}
+ \end{methods}
+ \sourcecode{math/kthperm.cpp}
+ \begin{methods}
+ \method{permIndex}{bestimmt Index der Permutation \big($\mathit{res} \in [0, n!$)\big)}{n\*\log(n)}
+ \end{methods}
+ \sourcecode{math/permIndex.cpp}
+\end{algorithm}
+\clearpage
+
+\subsection{Mod-Exponent und Multiplikation über $\boldsymbol{\mathbb{F}_p}$}
+%\vspace{-1.25em}
+%\begin{multicols}{2}
+\method{mulMod}{berechnet $a \cdot b \bmod n$}{\log(b)}
+\sourcecode{math/modMulIterativ.cpp}
+% \vfill\null\columnbreak
+\method{powMod}{berechnet $a^b \bmod n$}{\log(b)}
+\sourcecode{math/modPowIterativ.cpp}
+%\end{multicols}
+%\vspace{-2.75em}
+\begin{itemize}
+ \item für $a > 10^9$ \code{__int128} oder \code{modMul} benutzten!
+\end{itemize}
+
+\begin{algorithm}{ggT, kgV, erweiterter euklidischer Algorithmus}
+ \runtime{\log(a) + \log(b)}
+ \sourcecode{math/extendedEuclid.cpp}
+\end{algorithm}
+
+\subsection{Multiplikatives Inverses von $\boldsymbol{x}$ in $\boldsymbol{\mathbb{Z}/m\mathbb{Z}}$}
+\textbf{Falls $\boldsymbol{m}$ prim:}\quad $x^{-1} \equiv x^{m-2} \bmod m$
+
+\textbf{Falls $\boldsymbol{\ggT(x, m) = 1}$:}
+\begin{itemize}
+ \item Erweiterter euklidischer Algorithmus liefert $\alpha$ und $\beta$ mit
+ $\alpha x + \beta m = 1$.
+ \item Nach Kongruenz gilt $\alpha x + \beta m \equiv \alpha x \equiv 1 \bmod m$.
+ \item $x^{-1} :\equiv \alpha \bmod m$
+\end{itemize}
+\textbf{Sonst $\boldsymbol{\ggT(x, m) > 1}$:}\quad Es existiert kein $x^{-1}$.
+% \sourcecode{math/multInv.cpp}
+\sourcecode{math/shortModInv.cpp}
+
+\paragraph{Lemma von \textsc{Bézout}}
+Sei $(x, y)$ eine Lösung der diophantischen Gleichung $ax + by = d$.
+Dann lassen sich wie folgt alle Lösungen berechnen:
+\[
+\left(x + k\frac{b}{\ggT(a, b)},~y - k\frac{a}{\ggT(a, b)}\right)
+\]
+
+\paragraph{\textsc{Pell}-Gleichungen}
+Sei $(\overline{x}, \overline{y})$ die Lösung von $x^2 - ny^2 = 1$, die $x>1$ minimiert.
+Sei $(\tilde{x}, \tilde{y})$ die Lösung von $x^2-ny^2 = c$, die $x>1$ minimiert. Dann lassen
+sich alle Lösungen von $x^2-ny^2=c$ berechnen durch:
+\begin{align*}
+ x_1&\coloneqq \tilde{x}, & y_1&\coloneqq\tilde{y}\\
+ x_{k+1}&\coloneqq \overline{x}x_k+n\overline{y}y_k, & y_{k+1}&\coloneqq\overline{x}y_k+\overline{y}x_k
+\end{align*}
+
+\begin{algorithm}{Lineare Kongruenz}
+ \begin{itemize}
+ \item Kleinste Lösung $x$ für $ax\equiv b\pmod{m}$.
+ \item Weitere Lösungen unterscheiden sich um \raisebox{2pt}{$\frac{m}{g}$}, es gibt
+ also $g$ Lösungen modulo $m$.
+ \end{itemize}
+ \sourcecode{math/linearCongruence.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Chinesischer Restsatz}
+ \begin{itemize}
+ \item Extrem anfällig gegen Overflows. Evtl. häufig 128-Bit Integer verwenden.
+ \item Direkte Formel für zwei Kongruenzen $x \equiv a \bmod n$, $x \equiv b \bmod m$:
+ \[
+ x \equiv a - y \cdot n \cdot \frac{a - b}{d} \bmod \frac{mn}{d}
+ \qquad \text{mit} \qquad
+ d := \ggT(n, m) = yn + zm
+ \]
+ Formel kann auch für nicht teilerfremde Moduli verwendet werden.
+ Sind die Moduli nicht teilerfremd, existiert genau dann eine Lösung,
+ wenn $a\equiv~b \bmod \ggT(m, n)$.
+ In diesem Fall sind keine Faktoren
+ auf der linken Seite erlaubt.
+ \end{itemize}
+ \sourcecode{math/chineseRemainder.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Primzahltest \& Faktorisierung}
+ \method{isPrime}{prüft ob Zahl prim ist}{\log(n)^2}
+ \sourcecode{math/millerRabin.cpp}
+ \method{rho}{findet zufälligen Teiler}{\sqrt[\leftroot{3}\uproot{2}4]{n}}
+ \sourcecode{math/rho.cpp}
+ %\method{squfof}{findet zufälligen Teiler}{\sqrt[\leftroot{4}\uproot{2}4]{n}}
+ %\sourcecode{math/squfof.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Teiler}
+ \begin{methods}
+ \method{countDivisors}{Zählt Teiler von $n$}{\sqrt[\leftroot{3}\uproot{2}3]{n}}
+ \end{methods}
+ \sourcecode{math/divisors.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Matrix-Exponent}
+ \begin{methods}
+ \method{precalc}{berechnet $m^{2^b}$ vor}{\log(b)\*n^3}
+ \method{calc}{berechnet $m^b\cdot$}{\log(b)\cdot n^2}
+ \end{methods}
+ \textbf{Tipp:} wenn \code{v[x]=1} und \code{0} sonst, dann ist \code{res[y]} = $m^b_{y,x}$.
+ \sourcecode{math/matrixPower.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Lineare Rekurrenz}
+ \begin{methods}
+ \method{BerlekampMassey}{Berechnet eine lineare Rekurrenz $n$-ter Ordnung}{n^2}
+ \method{}{aus den ersten $2n$ Werte}{}
+ \end{methods}
+ \sourcecode{math/berlekampMassey.cpp}
+ Sei $f(n)=c_{0}f(n-1)+c_{1}f(n-2)+\dots + c_{n-1}f(0)$ eine lineare Rekurrenz.
+
+ \begin{methods}
+ \method{kthTerm}{Berechnet $k$-ten Term einer Rekurrenz $n$-ter Ordnung}{\log(k)\cdot n^2}
+ \end{methods}
+ \sourcecode{math/linearRecurence.cpp}
+ Alternativ kann der \mbox{$k$-te} Term in \runtime{n^3\log(k)} berechnet werden:
+ $$\renewcommand\arraystretch{1.5}
+ \setlength\arraycolsep{3pt}
+ \begin{pmatrix}
+ c_{0} & c_{1} & \smash{\cdots} & \smash{\cdots} & c_{n-1} \\
+ 1 & 0 & \smash{\cdots} & \smash{\cdots} & 0 \\
+ 0 & \smash{\ddots} & \smash{\ddots} & & \smash{\vdots} \\
+ \smash{\vdots} & \smash{\ddots} & \smash{\ddots} & \smash{\ddots} & \smash{\vdots} \\
+ 0 & \smash{\cdots} & 0 & 1 & 0 \\
+ \end{pmatrix}^k
+ \times~~
+ \begin{pmatrix}
+ f(n-1) \\
+ f(n-2) \\
+ \smash{\vdots} \\
+ \smash{\vdots} \\
+ f(0) \\
+ \end{pmatrix}
+ ~~=~~
+ \begin{pmatrix}
+ f(n-1+k) \\
+ f(n-2+k) \\
+ \smash{\vdots} \\
+ \smash{\vdots} \\
+ f(k) \makebox[0pt][l]{\hspace{15pt}$\vcenter{\hbox{\huge$\leftarrow$}}$}\\
+ \end{pmatrix}
+ $$
+\end{algorithm}
+
+\begin{algorithm}{Diskreter Logarithmus}
+ \begin{methods}
+ \method{solve}{bestimmt Lösung $x$ für $a^x=b \bmod m$}{\sqrt{m}\*\log(m)}
+ \end{methods}
+ \sourcecode{math/discreteLogarithm.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Diskrete Quadratwurzel}
+ \begin{methods}
+ \method{sqrtMod}{bestimmt Lösung $x$ für $x^2=a \bmod p$ }{\log(p)}
+ \end{methods}
+ \textbf{Wichtig:} $p$ muss prim sein!
+ \sourcecode{math/sqrtModCipolla.cpp}
+\end{algorithm}
+%\columnbreak
+
+\begin{algorithm}{Primitivwurzeln}
+ \begin{itemize}
+ \item Primitivwurzel modulo $n$ existiert $\Leftrightarrow$ $n \in \{2,\ 4,\ p^\alpha,\ 2\cdot p^\alpha \mid\ 2 < p \in \mathbb{P},\ \alpha \in \mathbb{N}\}$
+ \item es existiert entweder keine oder $\varphi(\varphi(n))$ inkongruente Primitivwurzeln
+ \item Sei $g$ Primitivwurzel modulo $n$.
+ Dann gilt:\newline
+ Das kleinste $k$, sodass $g^k \equiv 1 \bmod n$, ist $k = \varphi(n)$.
+ \end{itemize}
+ \begin{methods}
+ \method{isPrimitive}{prüft ob $g$ eine Primitivwurzel ist}{\log(\varphi(n))\*\log(n)}
+ \method{findPrimitive}{findet Primitivwurzel (oder -1)}{\abs{ans}\*\log(\varphi(n))\*\log(n)}
+ \end{methods}
+ \sourcecode{math/primitiveRoot.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Diskrete \textrm{\textit{n}}-te Wurzel}
+ \begin{methods}
+ \method{root}{bestimmt Lösung $x$ für $x^a=b \bmod m$ }{\sqrt{m}\*\log(m)}
+ \end{methods}
+ Alle Lösungen haben die Form $g^{c + \frac{i \cdot \phi(n)}{\gcd(a, \phi(n))}}$
+ \sourcecode{math/discreteNthRoot.cpp}
+\end{algorithm}
+
+\begin{algorithm}{\textsc{Legendre}-Symbol}
+ Sei $p \geq 3$ eine Primzahl, $a \in \mathbb{Z}$:
+ \vspace{-0.15cm}\begin{align*}
+ \hspace*{3cm}\legendre{a}{p} &=
+ \begin{cases*}
+ \hphantom{-}0 & falls $p~\vert~a$ \\[-1ex]
+ \hphantom{-}1 & falls $\exists x \in \mathbb{Z}\backslash p\mathbb{Z} : a \equiv x^2 \bmod p$ \\[-1ex]
+ -1 & sonst
+ \end{cases*} \\
+ \legendre{-1}{p} = (-1)^{\frac{p - 1}{2}} &=
+ \begin{cases*}
+ \hphantom{-}1 & falls $p \equiv 1 \bmod 4$ \\[-1ex]
+ -1 & falls $p \equiv 3 \bmod 4$
+ \end{cases*} \\
+ \legendre{2}{p} = (-1)^{\frac{p^2 - 1}{8}} &=
+ \begin{cases*}
+ \hphantom{-}1 & falls $p \equiv \pm 1 \bmod 8$ \\[-1ex]
+ -1 & falls $p \equiv \pm 3 \bmod 8$
+ \end{cases*}
+ \end{align*}
+ \begin{align*}
+ \legendre{p}{q} \cdot \legendre{q}{p} = (-1)^{\frac{p - 1}{2} \cdot \frac{q - 1}{2}} &&
+ \legendre{a}{p} \equiv a^{\frac{p-1}{2}}\bmod p
+ \end{align*}
+ \vspace{-0.05cm}
+ \sourcecode{math/legendre.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Lineares Sieb und Multiplikative Funktionen}
+ Eine (zahlentheoretische) Funktion $f$ heißt multiplikativ wenn $f(1)=1$ und $f(a\cdot b)=f(a)\cdot f(b)$, falls $\ggT(a,b)=1$.
+
+ $\Rightarrow$ Es ist ausreichend $f(p^k)$ für alle primen $p$ und alle $k$ zu kennen.
+
+ \begin{methods}
+ \method{sieve}{berechnet Primzahlen und co.}{N}
+ \method{sieved}{Wert der entsprechenden multiplikativen Funktion}{1}
+
+ \method{naive}{Wert der entsprechenden multiplikativen Funktion}{\sqrt{n}}
+ \end{methods}
+ \textbf{Wichtig:} Sieb rechts ist schneller für \code{isPrime} oder \code{primes}!
+
+ \sourcecode{math/linearSieve.cpp}
+ \textbf{\textsc{Möbius}-Funktion:}
+ \begin{itemize}
+ \item $\mu(n)=+1$, falls $n$ quadratfrei ist und gerade viele Primteiler hat
+ \item $\mu(n)=-1$, falls $n$ quadratfrei ist und ungerade viele Primteiler hat
+ \item $\mu(n)=0$, falls $n$ nicht quadratfrei ist
+ \end{itemize}
+
+ \textbf{\textsc{Euler}sche $\boldsymbol{\varphi}$-Funktion:}
+ \begin{itemize}
+ \item Zählt die relativ primen Zahlen $\leq n$.
+ \item $p$ prim, $k \in \mathbb{N}$:
+ $~\varphi(p^k) = p^k - p^{k - 1}$
+
+ \item \textbf{Euler's Theorem:}
+ Für $b \geq \varphi(c)$ gilt: $a^b \equiv a^{b \bmod \varphi(c) + \varphi(c)} \pmod{c}$. Darüber hinaus gilt: $\gcd(a, c) = 1 \Leftrightarrow a^b \equiv a^{b \bmod \varphi(c)} \pmod{c}$.
+ Falls $m$ prim ist, liefert das den \textbf{kleinen Satz von \textsc{Fermat}}:
+ $a^{m} \equiv a \pmod{m}$
+ \end{itemize}
+\end{algorithm}
+
+\begin{algorithm}{Primzahlsieb von \textsc{Eratosthenes}}
+ \begin{itemize}
+ \item Bis $10^8$ in unter 64MB Speicher (lange Berechnung)
+ \end{itemize}
+ \begin{methods}
+ \method{primeSieve}{berechnet Primzahlen und Anzahl}{N\*\log(\log(N))}
+ \method{isPrime}{prüft ob Zahl prim ist}{1}
+ \end{methods}
+ \sourcecode{math/primeSieve.cpp}
+\end{algorithm}
+
+\begin{algorithm}{\textsc{Möbius}-Inversion}
+ \begin{itemize}
+ \item Seien $f,g : \mathbb{N} \to \mathbb{N}$ und $g(n) := \sum_{d \vert n}f(d)$.
+ Dann ist $f(n) = \sum_{d \vert n}g(d)\mu(\frac{n}{d})$.
+ \item $\sum\limits_{d \vert n}\mu(d) =
+ \begin{cases*}
+ 1 & falls $n = 1$\\
+ 0 & sonst
+ \end{cases*}$
+ \end{itemize}
+ \textbf{Beispiel Inklusion/Exklusion:}
+ Gegeben sein eine Sequenz $A={a_1,\ldots,a_n}$ von Zahlen, $1 \leq a_i \leq N$. Zähle die Anzahl der \emph{coprime subsequences}.\newline
+ \textbf{Lösung}:
+ Für jedes $x$, sei $cnt[x]$ die Anzahl der Vielfachen von $x$ in $A$.
+ Es gibt $2^{[x]}-1$ nicht leere Subsequences in $A$, die nur Vielfache von $x$ enthalten.
+ Die Anzahl der Subsequences mit $\ggT=1$ ist gegeben durch $\sum_{i = 1}^N \mu(i) \cdot (2^{cnt[i]} - 1)$.
+\end{algorithm}
+
+\subsection{LGS über $\boldsymbol{\mathbb{F}_p}$}
+\method{gauss}{löst LGS}{n^3}
+\sourcecode{math/lgsFp.cpp}
+
+\subsection{LGS über $\boldsymbol{\mathbb{R}}$}
+\method{gauss}{löst LGS}{n^3}
+\sourcecode{math/gauss.cpp}
+
+\vfill\null\columnbreak
+
+\begin{algorithm}{Numerisch Extremstelle bestimmen}
+ \sourcecode{math/goldenSectionSearch.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Numerisch Integrieren, Simpsonregel}
+ \sourcecode{math/simpson.cpp}
+\end{algorithm}
+
+
+\begin{algorithm}{Polynome, FFT, NTT \& andere Transformationen}
+ Multipliziert Polynome $A$ und $B$.
+ \begin{itemize}
+ \item $\deg(A \cdot B) = \deg(A) + \deg(B)$
+ \item Vektoren \code{a} und \code{b} müssen mindestens Größe
+ $\deg(A \cdot B) + 1$ haben.
+ Größe muss eine Zweierpotenz sein.
+ \item Für ganzzahlige Koeffizienten: \code{(ll)round(real(a[i]))}
+ \item \emph{xor}, \emph{or} und \emph{and} Transform funktioniert auch mit \code{double} oder modulo einer Primzahl $p$ falls $p \geq 2^{\texttt{bits}}$
+ \end{itemize}
+ %\sourcecode{math/fft.cpp}
+ %\sourcecode{math/ntt.cpp}
+ \sourcecode{math/transforms/fft.cpp}
+ \sourcecode{math/transforms/ntt.cpp}
+ \sourcecode{math/transforms/bitwiseTransforms.cpp}
+ Multiplikation mit 2 transforms statt 3: (nur benutzten wenn nötig!)
+ \sourcecode{math/transforms/fftMul.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Operations on Formal Power Series}
+ \sourcecode{math/transforms/seriesOperations.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Inversionszahl}
+ \sourcecode{math/inversions.cpp}
+\end{algorithm}
+
+\subsection{Satz von \textsc{Sprague-Grundy}}
+Weise jedem Zustand $X$ wie folgt eine \textsc{Grundy}-Zahl $g\left(X\right)$ zu:
+\[
+g\left(X\right) := \min\left\{
+\mathbb{Z}_0^+ \setminus
+\left\{g\left(Y\right) \mid Y \text{ von } X \text{ aus direkt erreichbar}\right\}
+\right\}
+\]
+$X$ ist genau dann gewonnen, wenn $g\left(X\right) > 0$ ist.\\
+Wenn man $k$ Spiele in den Zuständen $X_1, \ldots, X_k$ hat, dann ist die \textsc{Grundy}-Zahl des Gesamtzustandes $g\left(X_1\right) \oplus \ldots \oplus g\left(X_k\right)$.
+
+\subsection{Kombinatorik}
+
+\paragraph{Wilsons Theorem}
+A number $n$ is prime if and only if
+$(n-1)!\equiv -1\bmod{n}$.\\
+($n$ is prime if and only if $(m-1)!\cdot(n-m)!\equiv(-1)^m\bmod{n}$ for all $m$ in $\{1,\dots,n\}$)
+\begin{align*}
+ (n-1)!\equiv\begin{cases}
+ -1\bmod{n},&\mathrm{falls}~n \in \mathbb{P}\\
+ \hphantom{-}2\bmod{n},&\mathrm{falls}~n = 4\\
+ \hphantom{-}0\bmod{n},&\mathrm{sonst}
+ \end{cases}
+\end{align*}
+
+\paragraph{\textsc{Zeckendorfs} Theorem}
+Jede positive natürliche Zahl kann eindeutig als Summe einer oder mehrerer
+verschiedener \textsc{Fibonacci}-Zahlen geschrieben werden, sodass keine zwei
+aufeinanderfolgenden \textsc{Fibonacci}-Zahlen in der Summe vorkommen.\\
+\emph{Lösung:} Greedy, nimm immer die größte \textsc{Fibonacci}-Zahl, die noch
+hineinpasst.
+
+\paragraph{\textsc{Lucas}-Theorem}
+Ist $p$ prim, $m=\sum_{i=0}^km_ip^i$, $n=\sum_{i=0}^kn_ip^i$ ($p$-adische Darstellung),
+so gilt
+\vspace{-0.75\baselineskip}
+\[
+ \binom{m}{n} \equiv \prod_{i=0}^k\binom{m_i}{n_i} \bmod{p}.
+\]
+
+%\begin{algorithm}{Binomialkoeffizienten}
+\paragraph{Binomialkoeffizienten}
+ Die Anzahl der \mbox{$k$-elementigen} Teilmengen einer \mbox{$n$-elementigen} Menge.
+
+ \begin{methods}
+ \method{precalc}{berechnet $n!$ und $n!^{-1}$ vor}{\mathit{lim}}
+ \method{calc\_binom}{berechnet Binomialkoeffizient}{1}
+ \end{methods}
+ \sourcecode{math/binomial0.cpp}
+ Falls $n >= p$ for $\mathit{mod}=p^k$ berechne \textit{fac} und \textit{inv} aber teile $p$ aus $i$ und berechne die häufigkeit von $p$ in $n!$ als $\sum\limits_{i=1}\big\lfloor\frac{n}{p^i}\big\rfloor$
+
+ \begin{methods}
+ \method{calc\_binom}{berechnet Binomialkoeffizient $(n \le 61)$}{k}
+ \end{methods}
+ \sourcecode{math/binomial1.cpp}
+
+ \begin{methods}
+ \method{calc\_binom}{berechnet Binomialkoeffizient modulo Primzahl $p$}{p-n}
+ \end{methods}
+ \sourcecode{math/binomial3.cpp}
+
+% \begin{methods}
+% \method{calc\_binom}{berechnet Primfaktoren vom Binomialkoeffizient}{n}
+% \end{methods}
+% \textbf{WICHTIG:} braucht alle Primzahlen $\leq n$
+% \sourcecode{math/binomial2.cpp}
+%\end{algorithm}
+
+\paragraph{\textsc{Catalan}-Zahlen}
+\begin{itemize}
+ \item Die \textsc{Catalan}-Zahl $C_n$ gibt an:
+ \begin{itemize}
+ \item Anzahl der Binärbäume mit $n$ nicht unterscheidbaren Knoten.
+ \item Anzahl der validen Klammerausdrücke mit $n$ Klammerpaaren.
+ \item Anzahl der korrekten Klammerungen von $n+1$ Faktoren.
+ \item Anzahl Möglichkeiten ein konvexes Polygon mit $n + 2$ Ecken zu triangulieren.
+ \item Anzahl der monotonen Pfade (zwischen gegenüberliegenden Ecken) in
+ einem $n \times n$-Gitter, die nicht die Diagonale kreuzen.
+ \end{itemize}
+\end{itemize}
+\[C_0 = 1\qquad C_n = \sum\limits_{k = 0}^{n - 1} C_kC_{n - 1 - k} =
+\frac{1}{n + 1}\binom{2n}{n} = \frac{4n - 2}{n+1} \cdot C_{n-1}\]
+\begin{itemize}
+ \item Formel $1$ erlaubt Berechnung ohne Division in \runtime{n^2}
+ \item Formel $2$ und $3$ erlauben Berechnung in \runtime{n}
+\end{itemize}
+
+\paragraph{\textsc{Catalan}-Convolution}
+\begin{itemize}
+ \item Anzahl an Klammerausdrücken mit $n+k$ Klammerpaaren, die mit $(^k$ beginnen.
+\end{itemize}
+\[C^k_0 = 1\qquad C^k_n = \sum\limits_{\mathclap{a_0+a_1+\dots+a_k=n}} C_{a_0}C_{a_1}\cdots C_{a_k} =
+\frac{k+1}{n+k+1}\binom{2n+k}{n} = \frac{(2n+k-1)\cdot(2n+k)}{n(n+k+1)} \cdot C_{n-1}\]
+
+\paragraph{\textsc{Euler}-Zahlen 1. Ordnung}
+Die Anzahl der Permutationen von $\{1, \ldots, n\}$ mit genau $k$ Anstiegen.
+Für die $n$-te Zahl gibt es $n$ mögliche Positionen zum Einfügen.
+Dabei wird entweder ein Anstieg in zwei gesplitted oder ein Anstieg um $n$ ergänzt.
+\[\eulerI{n}{0} = \eulerI{n}{n-1} = 1 \quad
+\eulerI{n}{k} = (k+1) \eulerI{n-1}{k} + (n-k) \eulerI{n-1}{k-1}=
+\sum_{i=0}^{k} (-1)^i\binom{n+1}{i}(k+1-i)^n\]
+\begin{itemize}
+ \item Formel $1$ erlaubt Berechnung ohne Division in \runtime{n^2}
+ \item Formel $2$ erlaubt Berechnung in \runtime{n\log(n)}
+\end{itemize}
+
+\paragraph{\textsc{Euler}-Zahlen 2. Ordnung}
+Die Anzahl der Permutationen von $\{1,1, \ldots, n,n\}$ mit genau $k$ Anstiegen.
+\[\eulerII{n}{0} = 1 \qquad\eulerII{n}{n} = 0 \qquad\eulerII{n}{k} = (k+1) \eulerII{n-1}{k} + (2n-k-1) \eulerII{n-1}{k-1}\]
+\begin{itemize}
+ \item Formel erlaubt Berechnung ohne Division in \runtime{n^2}
+\end{itemize}
+
+\paragraph{\textsc{Stirling}-Zahlen 1. Ordnung}
+Die Anzahl der Permutationen von $\{1, \ldots, n\}$ mit genau $k$ Zyklen.
+Es gibt zwei Möglichkeiten für die $n$-te Zahl. Entweder sie bildet einen eigene Zyklus, oder sie kann an jeder Position in jedem Zyklus einsortiert werden.
+\[\stirlingI{0}{0} = 1 \qquad
+\stirlingI{n}{0} = \stirlingI{0}{n} = 0 \qquad
+\stirlingI{n}{k} = \stirlingI{n-1}{k-1} + (n-1) \stirlingI{n-1}{k}\]
+\begin{itemize}
+ \item Formel erlaubt berechnung ohne Division in \runtime{n^2}
+\end{itemize}
+\[\sum_{k=0}^{n}\pm\stirlingI{n}{k}x^k=x(x-1)(x-2)\cdots(x-n+1)\]
+\begin{itemize}
+ \item Berechne Polynom mit FFT und benutzte betrag der Koeffizienten \runtime{n\log(n)^2} (nur ungefähr gleich große Polynome zusammen multiplizieren beginnend mit $x-k$)
+\end{itemize}
+
+\paragraph{\textsc{Stirling}-Zahlen 2. Ordnung}
+Die Anzahl der Möglichkeiten $n$ Elemente in $k$ nichtleere Teilmengen zu zerlegen.
+Es gibt $k$ Möglichkeiten die $n$ in eine $n-1$-Partition einzuordnen.
+Dazu kommt der Fall, dass die $n$ in ihrer eigenen Teilmenge (alleine) steht.
+\[\stirlingII{n}{1} = \stirlingII{n}{n} = 1 \qquad
+\stirlingII{n}{k} = k \stirlingII{n-1}{k} + \stirlingII{n-1}{k-1} =
+\frac{1}{k!} \sum\limits_{i=0}^{k} (-1)^{k-i}\binom{k}{i}i^n\]
+\begin{itemize}
+ \item Formel $1$ erlaubt Berechnung ohne Division in \runtime{n^2}
+ \item Formel $2$ erlaubt Berechnung in \runtime{n\log(n)}
+\end{itemize}
+
+\paragraph{\textsc{Bell}-Zahlen}
+Anzahl der Partitionen von $\{1, \ldots, n\}$.
+Wie \textsc{Stirling}-Zahlen 2. Ordnung ohne Limit durch $k$.
+\[B_1 = 1 \qquad
+B_n = \sum\limits_{k = 0}^{n - 1} B_k\binom{n-1}{k}
+= \sum\limits_{k = 0}^{n}\stirlingII{n}{k}\qquad\qquad B_{p^m+n}\equiv m\cdot B_n + B_{n+1} \bmod{p}\]
+
+\paragraph{Partitions}
+Die Anzahl der Partitionen von $n$ in genau $k$ positive Summanden.
+Die Anzahl der Partitionen von $n$ mit Elementen aus ${1,\dots,k}$.
+\begin{align*}
+ p_0(0)=1 \qquad p_k(n)&=0 \text{ für } k > n \text{ oder } n \leq 0 \text{ oder } k \leq 0\\
+ p_k(n)&= p_k(n-k) + p_{k-1}(n-1)\\[2pt]
+ p(n)&=\sum_{k=1}^{n} p_k(n)=p_n(2n)=\sum\limits_{k\neq0}^\infty(-1)^{k+1}p\bigg(n - \frac{k(3k-1)}{2}\bigg)
+\end{align*}
+\begin{itemize}
+ \item in Formel $3$ kann abgebrochen werden wenn $\frac{k(3k-1)}{2} > n$.
+ \item Die Anzahl der Partitionen von $n$ in bis zu $k$ positive Summanden ist $\sum\limits_{i=0}^{k}p_i(n)=p_k(n+k)$.
+\end{itemize}
+
+\subsection{The Twelvefold Way \textnormal{(verteile $n$ Bälle auf $k$ Boxen)}}
+\input{math/tables/twelvefold}
+
+\optional{
+\subsection{Primzahlzählfunktion $\boldsymbol{\pi}$}
+\begin{methods}
+ \method{init}{berechnet $\pi$ bis $N$}{N\*\log(\log(N))}
+ \method{phi}{zählt zu $p_i$ teilerfremde Zahlen $\leq n$ für alle $i \leq k$}{???}
+ \method{pi}{zählt Primzahlen $\leq n$ ($n < N^2$)}{n^{2/3}}
+\end{methods}
+\sourcecode{math/piLehmer.cpp}
+}
+
+%\input{math/tables/numbers}
+
+\begin{algorithm}[optional]{Big Integers}
+ \sourcecode{math/bigint.cpp}
+\end{algorithm}
diff --git a/content/math/matrixPower.cpp b/content/math/matrixPower.cpp
new file mode 100644
index 0000000..d981e6e
--- /dev/null
+++ b/content/math/matrixPower.cpp
@@ -0,0 +1,14 @@
+vector<mat> pows;
+
+void precalc(mat m) {
+ pows = {mat(sz(m.m), 1), m};
+ for (int i = 1; i < 60; i++) pows.push_back(pows[i] * pows[i]);
+}
+
+auto calc(ll b, vector<ll> v) {
+ for (ll i = 1; b > 0; i++) {
+ if (b & 1) v = pows[i] * v;
+ b /= 2;
+ }
+ return v;
+}
diff --git a/content/math/millerRabin.cpp b/content/math/millerRabin.cpp
new file mode 100644
index 0000000..cb27d29
--- /dev/null
+++ b/content/math/millerRabin.cpp
@@ -0,0 +1,19 @@
+constexpr ll bases32[] = {2, 7, 61};
+constexpr ll bases64[] = {2, 325, 9375, 28178, 450775,
+ 9780504, 1795265022};
+bool isPrime(ll n) {
+ if (n < 2 || n % 2 == 0) return n == 2;
+ ll d = n - 1, j = 0;
+ while (d % 2 == 0) d /= 2, j++;
+ for (ll a : bases64) {
+ if (a % n == 0) continue;
+ ll v = powMod(a, d, n); //with mulmod or int128
+ if (v == 1 || v == n - 1) continue;
+ for (ll i = 1; i <= j; i++) {
+ v = ((lll)v * v) % n;
+ if (v == n - 1 || v <= 1) break;
+ }
+ if (v != n - 1) return false;
+ }
+ return true;
+}
diff --git a/content/math/modExp.cpp b/content/math/modExp.cpp
new file mode 100644
index 0000000..2329a94
--- /dev/null
+++ b/content/math/modExp.cpp
@@ -0,0 +1,6 @@
+ll powMod(ll a, ll b, ll n) {
+ if(b == 0) return 1;
+ if(b == 1) return a % n;
+ if(b & 1) return (powMod(a, b - 1, n) * a) % n;
+ else return powMod((a * a) % n, b / 2, n);
+}
diff --git a/content/math/modMulIterativ.cpp b/content/math/modMulIterativ.cpp
new file mode 100644
index 0000000..611f09a
--- /dev/null
+++ b/content/math/modMulIterativ.cpp
@@ -0,0 +1,9 @@
+ll mulMod(ll a, ll b, ll n) {
+ ll res = 0;
+ while (b > 0) {
+ if (b & 1) res = (a + res) % n;
+ a = (a * 2) % n;
+ b /= 2;
+ }
+ return res;
+}
diff --git a/content/math/modPowIterativ.cpp b/content/math/modPowIterativ.cpp
new file mode 100644
index 0000000..0dc3fb1
--- /dev/null
+++ b/content/math/modPowIterativ.cpp
@@ -0,0 +1,9 @@
+ll powMod(ll a, ll b, ll n) {
+ ll res = 1;
+ while (b > 0) {
+ if (b & 1) res = (a * res) % n;
+ a = (a * a) % n;
+ b /= 2;
+ }
+ return res;
+}
diff --git a/content/math/multInv.cpp b/content/math/multInv.cpp
new file mode 100644
index 0000000..647dc2d
--- /dev/null
+++ b/content/math/multInv.cpp
@@ -0,0 +1,4 @@
+ll multInv(ll x, ll m) {
+ auto [d, a, b] = extendedEuclid(x, m); // Implementierung von oben.
+ return ((a % m) + m) % m;
+}
diff --git a/content/math/permIndex.cpp b/content/math/permIndex.cpp
new file mode 100644
index 0000000..4cffc12
--- /dev/null
+++ b/content/math/permIndex.cpp
@@ -0,0 +1,13 @@
+ll permIndex(vector<ll> v) {
+ Tree<ll> t;
+ reverse(all(v));
+ for (ll& x : v) {
+ t.insert(x);
+ x = t.order_of_key(x);
+ }
+ ll res = 0;
+ for (int i = sz(v); i > 0; i--) {
+ res = res * i + v[i - 1];
+ }
+ return res;
+}
diff --git a/content/math/piLegendre.cpp b/content/math/piLegendre.cpp
new file mode 100644
index 0000000..21b974b
--- /dev/null
+++ b/content/math/piLegendre.cpp
@@ -0,0 +1,23 @@
+constexpr ll cache = 500; // requires O(cache^3)
+vector<vector<ll>> memo(cache * cache, vector<ll>(cache));
+
+ll pi(ll n);
+
+ll phi(ll n, ll k) {
+ if (n <= 1 || k < 0) return 0;
+ if (n <= primes[k]) return n - 1;
+ if (n < N && primes[k] * primes[k] > n) return n - pi(n) + k;
+ bool ok = n < cache * cache;
+ if (ok && memo[n][k] > 0) return memo[n][k];
+ ll res = n/primes[k] - phi(n/primes[k], k - 1) + phi(n, k - 1);
+ if (ok) memo[n][k] = res;
+ return res;
+}
+
+ll pi(ll n) {
+ if (n < N) { // implement this as O(1) lookup for speedup!
+ return distance(primes.begin(), upper_bound(all(primes), n));
+ } else {
+ ll k = pi(sqrtl(n) + 1);
+ return n - phi(n, k) + k;
+}}
diff --git a/content/math/piLehmer.cpp b/content/math/piLehmer.cpp
new file mode 100644
index 0000000..17df85e
--- /dev/null
+++ b/content/math/piLehmer.cpp
@@ -0,0 +1,52 @@
+constexpr ll cacheA = 2 * 3 * 5 * 7 * 11 * 13 * 17;
+constexpr ll cacheB = 7;
+ll memoA[cacheA + 1][cacheB + 1];
+ll memoB[cacheB + 1];
+ll memoC[N];
+
+void init() {
+ primeSieve(); // @\sourceref{math/primeSieve.cpp}@
+ for (ll i = 0; i < N; i++) {
+ memoC[i] = memoC[i - 1];
+ if (isPrime(i)) memoC[i]++;
+ }
+ memoB[0] = 1;
+ for(ll i = 0; i <= cacheA; i++) memoA[i][0] = i;
+ for(ll i = 1; i <= cacheB; i++) {
+ memoB[i] = primes[i - 1] * memoB[i - 1];
+ for(ll j = 1; j <= cacheA; j++) {
+ memoA[j][i] = memoA[j][i - 1] - memoA[j /
+ primes[i - 1]][i - 1];
+}}}
+
+ll phi(ll n, ll k) {
+ if(k == 0) return n;
+ if(k <= cacheB)
+ return memoA[n % memoB[k]][k] +
+ (n / memoB[k]) * memoA[memoB[k]][k];
+ if(n <= primes[k - 1]*primes[k - 1]) return memoC[n] - k + 1;
+ if(n <= primes[k - 1]*primes[k - 1]*primes[k - 1] && n < N) {
+ ll b = memoC[(ll)sqrtl(n)];
+ ll res = memoC[n] - (b + k - 2) * (b - k + 1) / 2;
+ for(ll i = k; i < b; i++) res += memoC[n / primes[i]];
+ return res;
+ }
+ return phi(n, k - 1) - phi(n / primes[k - 1], k - 1);
+}
+
+ll pi(ll n) {
+ if (n < N) return memoC[n];
+ ll a = pi(sqrtl(sqrtl(n)));
+ ll b = pi(sqrtl(n));
+ ll c = pi(cbrtl(n));
+ ll res = phi(n, a) + (b + a - 2) * (b - a + 1) / 2;
+ for (ll i = a; i < b; i++) {
+ ll w = n / primes[i];
+ res -= pi(w);
+ if (i > c) continue;
+ ll bi = pi(sqrtl(w));
+ for (ll j = i; j < bi; j++) {
+ res -= pi(w / primes[j]) - j;
+ }}
+ return res;
+}
diff --git a/content/math/polynomial.cpp b/content/math/polynomial.cpp
new file mode 100644
index 0000000..44f6207
--- /dev/null
+++ b/content/math/polynomial.cpp
@@ -0,0 +1,65 @@
+struct poly {
+ vector<ll> data;
+
+ poly(int deg = 0) : data(max(1, deg)) {}
+ poly(initializer_list<ll> _data) : data(_data) {}
+
+ int size() const {return sz(data);}
+
+ void trim() {
+ for (ll& x : data) x = (x % mod + mod) % mod;
+ while (size() > 1 && data.back() == 0) data.pop_back();
+ }
+
+ ll& operator[](int x) {return data[x];}
+ const ll& operator[](int x) const {return data[x];}
+
+ ll operator()(int x) const {
+ ll res = 0;
+ for (int i = size() - 1; i >= 0; i--)
+ res = (res * x + data[i]) % mod;
+ return res % mod;
+ }
+
+ poly& operator+=(const poly& o) {
+ if (size() < o.size()) data.resize(o.size());
+ for (int i = 0; i < o.size(); i++)
+ data[i] = (data[i] + o[i]) % mod;
+ return *this;
+ }
+
+ poly operator*(const poly& o) const {
+ poly res(size() + o.size() - 1);
+ for (int i = 0; i < size(); i++) {
+ for (int j = 0; j < o.size(); j++) {
+ res[i + j] += (data[i] * o[j]) % mod;
+ }}
+ res.trim();
+ return res;
+ }
+
+ //return p(x+a)
+ poly operator<<(ll a) const {
+ poly res(size());
+ for (int i = size() - 1; i >= 0; i--) {
+ for (int j = size() - i - 1; j >= 1; j--)
+ res[j] = (res[j] * a + res[j - 1]) % mod;
+ res[0] = (res[0] * a + res[i]) % mod;
+ }
+ return res;
+ }
+
+ pair<poly, poly> divmod(const poly& d) const {
+ int i = size() - d.size();
+ poly s(i + 1), r = *this;
+ ll inv = multInv(d.data.back(), mod);
+ for (; i >= 0; i--) {
+ s[i] = (r.data.back() * inv) % mod;
+ r.data.pop_back();
+ for (int j = 0; i + j < r.size(); j++) {
+ r[i + j] = (r.data[i + j] - s[i] * d[j]) % mod;
+ }}
+ s.trim(); r.trim();
+ return {s, r};
+ }
+};
diff --git a/content/math/primeSieve.cpp b/content/math/primeSieve.cpp
new file mode 100644
index 0000000..1b0f514
--- /dev/null
+++ b/content/math/primeSieve.cpp
@@ -0,0 +1,16 @@
+constexpr ll N = 100'000'000;
+bitset<N / 2> isNotPrime;
+vector<ll> primes = {2};
+
+bool isPrime(ll x) {
+ if (x < 2 || x % 2 == 0) return x == 2;
+ else return !isNotPrime[x / 2];
+}
+
+void primeSieve() {
+ for (ll i = 3; i < N; i += 2) {// i * i < N reicht für isPrime
+ if (!isNotPrime[i / 2]) {
+ primes.push_back(i); // optional
+ for (ll j = i * i; j < N; j+= 2 * i) {
+ isNotPrime[j / 2] = 1;
+}}}}
diff --git a/content/math/primitiveRoot.cpp b/content/math/primitiveRoot.cpp
new file mode 100644
index 0000000..39a0f64
--- /dev/null
+++ b/content/math/primitiveRoot.cpp
@@ -0,0 +1,23 @@
+bool isPrimitive(ll g, ll n, ll phi, map<ll, int>& phiFacts) {
+ if (g == 1) return n == 2;
+ if (gcd(g, n) > 1) return false;
+ for (auto [f, _] : phiFacts)
+ if (powMod(g, phi / f, n) == 1) return false;
+ return true;
+}
+
+bool isPrimitive(ll g, ll n) {
+ ll phin = phi(n); //isPrime(n) => phi(n) = n - 1
+ map<ll, int> phiFacts;
+ factor(phin, phiFacts);
+ return isPrimitive(g, n, phin, phiFacts);
+}
+
+ll findPrimitive(ll n) { //test auf existens geht schneller
+ ll phin = phi(n); //isPrime(n) => phi(n) = n - 1
+ map<ll, int> phiFacts;
+ factor(phin, phiFacts);
+ for (ll res = 1; res < n; res++) // oder zufällige Reihenfolge
+ if (isPrimitive(res, n, phin, phiFacts)) return res;
+ return -1;
+}
diff --git a/content/math/rho.cpp b/content/math/rho.cpp
new file mode 100644
index 0000000..ad640cd
--- /dev/null
+++ b/content/math/rho.cpp
@@ -0,0 +1,19 @@
+using lll = __int128;
+ll rho(ll n) { // Findet Faktor < n, nicht unbedingt prim.
+ if (n % 2 == 0) return 2;
+ ll x = 0, y = 0, prd = 2, i = n/2 + 7;
+ auto f = [&](lll c){return (c * c + i) % n;};
+ for (ll t = 30; t % 40 || gcd(prd, n) == 1; t++) {
+ if (x == y) x = ++i, y = f(x);
+ if (ll q = (lll)prd * abs(x-y) % n; q) prd = q;
+ x = f(x); y = f(f(y));
+ }
+ return gcd(prd, n);
+}
+
+void factor(ll n, map<ll, int>& facts) {
+ if (n == 1) return;
+ if (isPrime(n)) {facts[n]++; return;}
+ ll f = rho(n);
+ factor(n / f, facts); factor(f, facts);
+}
diff --git a/content/math/shortModInv.cpp b/content/math/shortModInv.cpp
new file mode 100644
index 0000000..f696cce
--- /dev/null
+++ b/content/math/shortModInv.cpp
@@ -0,0 +1,3 @@
+ll multInv(ll x, ll m) { // x^{-1} mod m
+ return 1 < x ? m - multInv(m % x, x) * m / x : 1;
+}
diff --git a/content/math/simpson.cpp b/content/math/simpson.cpp
new file mode 100644
index 0000000..7f237a4
--- /dev/null
+++ b/content/math/simpson.cpp
@@ -0,0 +1,12 @@
+//double f(double x) {return x;}
+
+double simps(double a, double b) {
+ return (f(a) + 4.0 * f((a + b) / 2.0) + f(b)) * (b - a) / 6.0;
+}
+
+double integrate(double a, double b) {
+ double m = (a + b) / 2.0;
+ double l = simps(a, m), r = simps(m, b), tot = simps(a, b);
+ if (abs(l + r - tot) < EPS) return tot;
+ return integrate(a, m) + integrate(m, b);
+}
diff --git a/content/math/sqrtModCipolla.cpp b/content/math/sqrtModCipolla.cpp
new file mode 100644
index 0000000..1fac0c5
--- /dev/null
+++ b/content/math/sqrtModCipolla.cpp
@@ -0,0 +1,14 @@
+ll sqrtMod(ll a, ll p) {// teste mit legendre ob lösung existiert
+ if (a < 2) return a;
+ ll t = 0;
+ while (legendre((t*t-4*a) % p, p) >= 0) t = rng() % p;
+ ll b = -t, c = -t, d = 1, m = p;
+ for (m++; m /= 2; b = (a+a-b*b) % p, a = (a*a) % p) {
+ if (m % 2) {
+ d = (c-d*b) % p;
+ c = (c*a) % p;
+ } else {
+ c = (d*a - c*b) % p;
+ }}
+ return (d + p) % p;
+}
diff --git a/content/math/squfof.cpp b/content/math/squfof.cpp
new file mode 100644
index 0000000..1cb97de
--- /dev/null
+++ b/content/math/squfof.cpp
@@ -0,0 +1,89 @@
+using lll = __int128;
+
+constexpr lll multipliers[] = {1, 3, 5, 7,
+ 11, 3*5, 3*7, 3*11,
+ 5*7, 5*11, 7*11,
+ 3*5*7, 3*5*11, 3*7*11,
+ 5*7*11, 3*5*7*11};
+
+lll root(lll x) {
+ lll r = sqrtl(x);
+ while(r*r < x) r++;
+ while(r*r > x) r--;
+ return r;
+}
+
+lll croot(lll x) {
+ lll r = cbrtl(x);
+ while(r*r*r < x) r++;
+ while(r*r*r > x) r--;
+ return r;
+}
+
+lll squfof(lll N) {
+ lll s = croot(N);
+ if (s*s*s == N) return s;
+ s = root(N);
+ if (s*s == N) return s;
+ for (lll k : multipliers) {
+ lll D = k * N;
+ lll Po, P, Pprev, q, b, r, i;
+ Po = Pprev = P = root(D);
+ lll Qprev = 1;
+ lll Q = D - Po*Po;
+ lll L = 2 * root(2 * s);
+ lll B = 3 * L;
+ for (i = 2; i < B; i++) {
+ b = (Po + P) / Q;
+ P = b*Q - P;
+ q = Q;
+ Q = Qprev + b * (Pprev - P);
+ r = root(Q);
+ if (!(i & 1) && r*r == Q) break;
+ Qprev = q;
+ Pprev = P;
+ }
+ if (i >= B) continue;
+ b = (Po - P) / r;
+ Pprev = P = b*r + P;
+ Qprev = r;
+ Q = (D-Pprev*Pprev)/Qprev;
+ i = 0;
+ do {
+ b = (Po + P) / Q;
+ Pprev = P;
+ P = b*Q - P;
+ q = Q;
+ Q = Qprev + b * (Pprev - P);
+ Qprev = q;
+ i++;
+ } while(P != Pprev);
+ r = gcd(N, Qprev);
+ if (r != 1 && r != N) return r;
+ }
+ exit(1);//try fallback to pollard rho
+}
+
+constexpr lll trialLim = 5'000;
+
+void factor(lll n, map<lll, int>& facts) {
+ for (lll i = 2; i * i <= n && i <= trialLim; i++) {
+ while (n % i == 0) {
+ facts[i]++;
+ n /= i;
+ }}
+ if (n > 1 && n < trialLim * trialLim) {
+ facts[n]++;
+ } else {
+ vector<lll> todo = {n};
+ while (!todo.empty()) {
+ lll c = todo.back();
+ todo.pop_back();
+ if (c == 1) continue;
+ if (isPrime(c)) {
+ facts[c]++;
+ } else {
+ lll d = squfof(c);
+ todo.push_back(d);
+ todo.push_back(c / d);
+}}}}
diff --git a/content/math/tables.tex b/content/math/tables.tex
new file mode 100644
index 0000000..53f3758
--- /dev/null
+++ b/content/math/tables.tex
@@ -0,0 +1,18 @@
+\enlargethispage{0.2cm}
+\begin{multicols*}{2}
+ \input{math/tables/binom}
+ \vfill
+ \input{math/tables/composite}
+ \vfill
+ \input{math/tables/platonic}
+ \vfill
+ \input{math/tables/series}
+
+ \columnbreak
+
+ \input{math/tables/probability}
+ \vfill
+ \input{math/tables/stuff}
+ \vfill
+ \input{math/tables/nim}
+\end{multicols*}
diff --git a/content/math/tables/binom.tex b/content/math/tables/binom.tex
new file mode 100644
index 0000000..878a6b0
--- /dev/null
+++ b/content/math/tables/binom.tex
@@ -0,0 +1,28 @@
+\begin{tabularx}{\linewidth}{|XXXX|}
+ \hline
+ \multicolumn{4}{|c|}{Binomialkoeffizienten} \\
+ \hline
+ \multicolumn{4}{|c|}{
+ $\frac{n!}{k!(n - k)!} \hfill=\hfill
+ \binom{n}{k} \hfill=\hfill
+ \binom{n}{n - k} \hfill=\hfill
+ \frac{n}{k}\binom{n - 1}{k - 1} \hfill=\hfill
+ \frac{n-k+1}{k}\binom{n}{k - 1} \hfill=\hfill
+ \binom{n - 1}{k} + \binom{n - 1}{k - 1} \hfill=\hfill
+ (-1)^k \binom{k - n - 1}{k} \hfill\approx\hfill
+ 2^{n} \cdot \frac{2}{\sqrt{2\pi n}}\cdot\exp\left(-\frac{2(x - \frac{n}{2})^2}{n}\right)$
+ } \\
+ \grayhline
+
+ $\sum\limits_{k = 0}^n \binom{n}{k} = 2^n$ &
+ $\sum\limits_{k = 0}^n \binom{k}{m} = \binom{n + 1}{m + 1}$ &
+ $\sum\limits_{i = 0}^n \binom{n}{i}^2 = \binom{2n}{n}$ &
+ $\sum\limits_{k = 0}^n\binom{r + k}{k} = \binom{r + n + 1}{n}$\\
+
+ $\binom{n}{m}\binom{m}{k} = \binom{n}{k}\binom{n - k}{m - k}$ &
+ $\sum\limits_{k = 0}^n \binom{r}{k}\binom{s}{n - k} = \binom{r + s}{n}$ &
+ \multicolumn{2}{l|}{
+ $\sum\limits_{i = 1}^n \binom{n}{i} F_i = F_{2n} \quad F_n = n\text{-th Fib.}$
+ }\\
+ \hline
+\end{tabularx}
diff --git a/content/math/tables/composite.tex b/content/math/tables/composite.tex
new file mode 100644
index 0000000..c261db1
--- /dev/null
+++ b/content/math/tables/composite.tex
@@ -0,0 +1,27 @@
+
+\begin{tabularx}{\linewidth}{|r||r||r|r||r|r|r||C|}
+ \hline
+ \multicolumn{8}{|c|}{Important Numbers} \\
+ \hline
+ $10^x$ & Highly Composite & \# Divs & $<$ Prime & $>$ Prime & \# Primes & primorial & \\
+ \hline
+ 1 & 6 & 4 & $-3$ & $+1$ & 4 & 2 & \\
+ 2 & 60 & 12 & $-3$ & $+1$ & 25 & 3 & \\
+ 3 & 840 & 32 & $-3$ & $+9$ & 168 & 4 & \\
+ 4 & 7\,560 & 64 & $-27$ & $+7$ & 1\,229 & 5 & \\
+ 5 & 83\,160 & 128 & $-9$ & $+3$ & 9\,592 & 6 & \\
+ 6 & 720\,720 & 240 & $-17$ & $+3$ & 78\,498 & 7 & \\
+ 7 & 8\,648\,640 & 448 & $-9$ & $+19$ & 664\,579 & 8 & \\
+ 8 & 73\,513\,440 & 768 & $-11$ & $+7$ & 5\,761\,455 & 8 & \\
+ 9 & 735\,134\,400 & 1\,344 & $-63$ & $+7$ & 50\,847\,534 & 9 & \\
+ 10 & 6\,983\,776\,800 & 2\,304 & $-33$ & $+19$ & 455\,052\,511 & 10 & \\
+ 11 & 97\,772\,875\,200 & 4\,032 & $-23$ & $+3$ & 4\,118\,054\,813 & 10 & \\
+ 12 & 963\,761\,198\,400 & 6\,720 & $-11$ & $+39$ & 37\,607\,912\,018 & 11 & \\
+ 13 & 9\,316\,358\,251\,200 & 10\,752 & $-29$ & $+37$ & 346\,065\,536\,839 & 12 & \\
+ 14 & 97\,821\,761\,637\,600 & 17\,280 & $-27$ & $+31$ & 3\,204\,941\,750\,802 & 12 & \\
+ 15 & 866\,421\,317\,361\,600 & 26\,880 & $-11$ & $+37$ & 29\,844\,570\,422\,669 & 13 & \\
+ 16 & 8\,086\,598\,962\,041\,600 & 41\,472 & $-63$ & $+61$ & 279\,238\,341\,033\,925 & 13 & \\
+ 17 & 74\,801\,040\,398\,884\,800 & 64\,512 & $-3$ & $+3$ & 2\,623\,557\,157\,654\,233 & 14 & \\
+ 18 & 897\,612\,484\,786\,617\,600 & 103\,680 & $-11$ & $+3$ & 24\,739\,954\,287\,740\,860 & 16 & \\
+ \hline
+\end{tabularx}
diff --git a/content/math/tables/nim.tex b/content/math/tables/nim.tex
new file mode 100644
index 0000000..8490d42
--- /dev/null
+++ b/content/math/tables/nim.tex
@@ -0,0 +1,96 @@
+\begin{tabularx}{\linewidth}{|p{0.37\linewidth}|X|}
+ \hline
+ \multicolumn{2}{|c|}{Nim-Spiele (\ding{182} letzter gewinnt (normal), \ding{183} letzter verliert)} \\
+ \hline
+ Beschreibung &
+ Strategie \\
+ \hline
+
+ $M = [\mathit{pile}_i]$\newline
+ $[x] := \{1, \ldots, x\}$&
+ $\mathit{SG} = \oplus_{i = 1}^n \mathit{pile}_i$\newline
+ \ding{182} Nimm von einem Stapel, sodass $\mathit{SG}$ $0$ wird.\newline
+ \ding{183} Genauso.
+ Außer: Bleiben nur noch Stapel der Größe $1$, erzeuge ungerade Anzahl solcher Stapel.\\
+ \hline
+
+ $M = \{a^m \mid m \geq 0\}$ &
+ $a$ ungerade: $\mathit{SG}_n = n \% 2$\newline
+ $a$ gerade:\newline
+ $\mathit{SG}_n = 2$, falls $n \equiv a \bmod (a + 1) $\newline
+ $\mathit{SG}_n = n \% (a + 1) \% 2$, sonst.\\
+ \hline
+
+ $M_{\text{\ding{172}}} = \left[\frac{\mathit{pile}_i}{2}\right]$\newline
+ $M_{\text{\ding{173}}} =
+ \left\{\left\lceil\frac{\mathit{pile}_i}{2}\right\rceil,~
+ \mathit{pile}_i\right\}$ &
+ \ding{172}
+ $\mathit{SG}_{2n} = n$,
+ $\mathit{SG}_{2n+1} = \mathit{SG}_n$\newline
+ \ding{173}
+ $\mathit{SG}_0 = 0$,
+ $\mathit{SG}_n = [\log_2 n] + 1$ \\
+ \hline
+
+ $M_{\text{\ding{172}}} = \text{Teiler von $\mathit{pile}_i$}$\newline
+ $M_{\text{\ding{173}}} = \text{echte Teiler von $\mathit{pile}_i$}$ &
+ \ding{172}
+ $\mathit{SG}_0 = 0$,
+ $\mathit{SG}_n = \mathit{SG}_{\text{\ding{173},n}} + 1$\newline
+ \ding{173}
+ $\mathit{ST}_1 = 0$,
+ $\mathit{SG}_n = \text{\#Nullen am Ende von $n_{bin}$}$\\
+ \hline
+
+ $M_{\text{\ding{172}}} = [k]$\newline
+ $M_{\text{\ding{173}}} = S$, ($S$ endlich)\newline
+ $M_{\text{\ding{174}}} = S \cup \{\mathit{pile}_i\}$ &
+ $\mathit{SG}_{\text{\ding{172}}, n} = n \bmod (k + 1)$\newline
+ \ding{182} Niederlage bei $\mathit{SG} = 0$\newline
+ \ding{183} Niederlage bei $\mathit{SG} = 1$\newline
+ $\mathit{SG}_{\text{\ding{174}}, n} = \mathit{SG}_{\text{\ding{173}}, n} + 1$\\
+ \hline
+
+ \multicolumn{2}{|l|}{
+ Für jedes endliche $M$ ist $\mathit{SG}$ eines Stapels irgendwann periodisch.
+ } \\
+ \hline
+
+ \textsc{Moore}'s Nim:\newline
+ Beliebige Zahl von maximal $k$ Stapeln. &
+ \ding{182}
+ Schreibe $\mathit{pile}_i$ binär.
+ Addiere ohne Übertrag zur Basis $k + 1$.
+ Niederlage, falls Ergebnis gleich 0.\newline
+ \ding{183}
+ Wenn alle Stapel $1$ sind:
+ Niederlage, wenn $n \equiv 1 \bmod (k + 1)$.
+ Sonst wie in \ding{182}.\\
+ \hline
+
+ Staircase Nim:\newline
+ $n$ Stapel in einer Reihe.
+ Beliebige Zahl von Stapel $i$ nach Stapel $i-1$. &
+ Niederlage, wenn Nim der ungeraden Spiele verloren ist:\newline
+ $\oplus_{i = 0}^{(n - 1) / 2} \mathit{pile}_{2i + 1} = 0$\\
+ \hline
+
+ \textsc{Lasker}'s Nim:\newline
+ Zwei mögliche Züge:\newline
+ 1) Nehme beliebige Zahl.\newline
+ 2) Teile Stapel in zwei Stapel (ohne Entnahme).&
+ $\mathit{SG}_n = n$, falls $n \equiv 1,2 \bmod 4$\newline
+ $\mathit{SG}_n = n + 1$, falls $n \equiv 3 \bmod 4$\newline
+ $\mathit{SG}_n = n - 1$, falls $n \equiv 0 \bmod 4$\\
+ \hline
+
+ \textsc{Kayles}' Nim:\newline
+ Zwei mögliche Züge:\newline
+ 1) Nehme beliebige Zahl.\newline
+ 2) Teile Stapel in zwei Stapel (mit Entnahme).&
+ Berechne $\mathit{SG}_n$ für kleine $n$ rekursiv.\newline
+ $n \in [72,83]: \quad 4, 1, 2, 8, 1, 4, 7, 2, 1, 8, 2, 7$\newline
+ Periode ab $n = 72$ der Länge $12$.\\
+ \hline
+\end{tabularx}
diff --git a/content/math/tables/numbers.tex b/content/math/tables/numbers.tex
new file mode 100644
index 0000000..1dc9f38
--- /dev/null
+++ b/content/math/tables/numbers.tex
@@ -0,0 +1,59 @@
+\begin{expandtable}
+\begin{tabularx}{\linewidth}{|l|X|}
+ \hline
+ \multicolumn{2}{|c|}{Berühmte Zahlen} \\
+ \hline
+ \textsc{Fibonacci} &
+ $f(0) = 0 \quad
+ f(1) = 1 \quad
+ f(n+2) = f(n+1) + f(n)$ \\
+ \grayhline
+
+ \textsc{Catalan} &
+ $C_0 = 1 \qquad
+ C_n = \sum\limits_{k = 0}^{n - 1} C_kC_{n - 1 - k} =
+ \frac{1}{n + 1}\binom{2n}{n} = \frac{2(2n - 1)}{n+1} \cdot C_{n-1}$ \\
+ \grayhline
+
+ \textsc{Euler} I &
+ $\eulerI{n}{0} = \eulerI{n}{n-1} = 1 \qquad
+ \eulerI{n}{k} = (k+1) \eulerI{n-1}{k} + (n-k) \eulerI{n-1}{k-1} $ \\
+ \grayhline
+
+ \textsc{Euler} II &
+ $\eulerII{n}{0} = 1 \quad
+ \eulerII{n}{n} = 0 \quad$\\
+ & $\eulerII{n}{k} = (k+1) \eulerII{n-1}{k} + (2n-k-1) \eulerII{n-1}{k-1}$ \\
+ \grayhline
+
+ \textsc{Stirling} I &
+ $\stirlingI{0}{0} = 1 \qquad
+ \stirlingI{n}{0} = \stirlingI{0}{n} = 0 \qquad
+ \stirlingI{n}{k} = \stirlingI{n-1}{k-1} + (n-1) \stirlingI{n-1}{k}$ \\
+ \grayhline
+
+ \textsc{Stirling} II &
+ $\stirlingII{n}{1} = \stirlingII{n}{n} = 1 \qquad
+ \stirlingII{n}{k} = k \stirlingII{n-1}{k} + \stirlingII{n-1}{k-1} =
+ \frac{1}{k!} \sum\limits_{j=0}^{k} (-1)^{k-j}\binom{k}{j}j^n$\\
+ \grayhline
+
+ \textsc{Bell} &
+ $B_1 = 1 \qquad
+ B_n = \sum\limits_{k = 0}^{n - 1} B_k\binom{n-1}{k}
+ = \sum\limits_{k = 0}^{n}\stirlingII{n}{k}$\\
+ \grayhline
+
+ \textsc{Partitions} &
+ $p(0,0) = 1 \quad
+ p(n,k) = 0 \text{ für } k > n \text{ oder } n \leq 0 \text{ oder } k \leq 0$ \\
+ & $p(n,k) = p(n-k,k) + p(n-1,k-1)$\\
+ \grayhline
+
+ \textsc{Partitions} &
+ $f(0) = 1 \quad f(n) = 0~(n < 0)$ \\
+ & $f(n)=\sum\limits_{k=1}^\infty(-1)^{k-1}f(n - \frac{k(3k+1)}{2})+\sum\limits_{k=1}^\infty(-1)^{k-1}f(n - \frac{k(3k-1)}{2})$\\
+
+ \hline
+\end{tabularx}
+\end{expandtable}
diff --git a/content/math/tables/platonic.tex b/content/math/tables/platonic.tex
new file mode 100644
index 0000000..f4ee554
--- /dev/null
+++ b/content/math/tables/platonic.tex
@@ -0,0 +1,39 @@
+\begin{tabularx}{\linewidth}{|X|CCCX|}
+ \hline
+ \multicolumn{5}{|c|}{Platonische Körper} \\
+ \hline
+ Übersicht & Seiten & Ecken & Kanten & dual zu \\
+ \hline
+ Tetraeder & 4 & 4 & 6 & Tetraeder \\
+ Würfel/Hexaeder & 6 & 8 & 12 & Oktaeder \\
+ Oktaeder & 8 & 6 & 12 & Würfel/Hexaeder\\
+ Dodekaeder & 12 & 20 & 30 & Ikosaeder \\
+ Ikosaeder & 20 & 12 & 30 & Dodekaeder \\
+ \hline
+ \multicolumn{5}{|c|}{Färbungen mit maximal $n$ Farben (bis auf Isomorphie)} \\
+ \hline
+ \multicolumn{3}{|l}{Ecken vom Oktaeder/Seiten vom Würfel} &
+ \multicolumn{2}{l|}{$(n^6 + 3n^4 + 12n^3 + 8n^2)/24$} \\
+
+ \multicolumn{3}{|l}{Ecken vom Würfel/Seiten vom Oktaeder} &
+ \multicolumn{2}{l|}{$(n^8 + 17n^4 + 6n^2)/24$} \\
+
+ \multicolumn{3}{|l}{Kanten vom Würfel/Oktaeder} &
+ \multicolumn{2}{l|}{$(n^{12} + 6n^7 + 3n^6 + 8n^4 + 6n^3)/24$} \\
+
+ \multicolumn{3}{|l}{Ecken/Seiten vom Tetraeder} &
+ \multicolumn{2}{l|}{$(n^4 + 11n^2)/12$} \\
+
+ \multicolumn{3}{|l}{Kanten vom Tetraeder} &
+ \multicolumn{2}{l|}{$(n^6 + 3n^4 + 8n^2)/12$} \\
+
+ \multicolumn{3}{|l}{Ecken vom Ikosaeder/Seiten vom Dodekaeder} &
+ \multicolumn{2}{l|}{$(n^{12} + 15n^6 + 44n^4)/60$} \\
+
+ \multicolumn{3}{|l}{Ecken vom Dodekaeder/Seiten vom Ikosaeder} &
+ \multicolumn{2}{l|}{$(n^{20} + 15n^{10} + 20n^8 + 24n^4)/60$} \\
+
+ \multicolumn{3}{|l}{Kanten vom Dodekaeder/Ikosaeder (evtl. falsch)} &
+ \multicolumn{2}{l|}{$(n^{30} + 15n^{16} + 20n^{10} + 24n^6)/60$} \\
+ \hline
+\end{tabularx}
diff --git a/content/math/tables/probability.tex b/content/math/tables/probability.tex
new file mode 100644
index 0000000..f265d10
--- /dev/null
+++ b/content/math/tables/probability.tex
@@ -0,0 +1,27 @@
+\begin{tabularx}{\linewidth}{|LICIR|}
+ \hline
+ \multicolumn{3}{|c|}{
+ Wahrscheinlichkeitstheorie ($A,B$ Ereignisse und $X,Y$ Variablen)
+ } \\
+ \hline
+ $\E(X + Y) = \E(X) + \E(Y)$ &
+ $\E(\alpha X) = \alpha \E(X)$ &
+ $X, Y$ unabh. $\Leftrightarrow \E(XY) = \E(X) \cdot \E(Y)$\\
+
+ $\Pr[A \vert B] = \frac{\Pr[A \land B]}{\Pr[B]}$ &
+ $A, B$ disj. $\Leftrightarrow \Pr[A \land B] = \Pr[A] \cdot \Pr[B]$ &
+ $\Pr[A \lor B] = \Pr[A] + \Pr[B] - \Pr[A \land B]$ \\
+ \hline
+\end{tabularx}
+\vfill
+\begin{tabularx}{\linewidth}{|Xlr|lrX|}
+ \hline
+ \multicolumn{6}{|c|}{\textsc{Bertrand}'s Ballot Theorem (Kandidaten $A$ und $B$, $k \in \mathbb{N}$)} \\
+ \hline
+ & $\#A > k\#B$ & $Pr = \frac{a - kb}{a + b}$ &
+ $\#B - \#A \leq k$ & $Pr = 1 - \frac{a!b!}{(a + k + 1)!(b - k - 1)!}$ & \\
+
+ & $\#A \geq k\#B$ & $Pr = \frac{a + 1 - kb}{a + 1}$ &
+ $\#A \geq \#B + k$ & $Num = \frac{a - k + 1 - b}{a - k + 1} \binom{a + b - k}{b}$ & \\
+ \hline
+\end{tabularx}
diff --git a/content/math/tables/series.tex b/content/math/tables/series.tex
new file mode 100644
index 0000000..3042781
--- /dev/null
+++ b/content/math/tables/series.tex
@@ -0,0 +1,33 @@
+\begin{tabularx}{\linewidth}{|XIXIXIX|}
+ \hline
+ \multicolumn{4}{|c|}{Reihen} \\
+ \hline
+ $\sum\limits_{i = 1}^n i = \frac{n(n+1)}{2}$ &
+ $\sum\limits_{i = 1}^n i^2 = \frac{n(n + 1)(2n + 1)}{6}$ &
+ $\sum\limits_{i = 1}^n i^3 = \frac{n^2 (n + 1)^2}{4}$ &
+ $H_n = \sum\limits_{i = 1}^n \frac{1}{i}$ \\
+ \grayhline
+
+ $\sum\limits_{i = 0}^n c^i = \frac{c^{n + 1} - 1}{c - 1} \quad c \neq 1$ &
+ $\sum\limits_{i = 0}^\infty c^i = \frac{1}{1 - c} \quad \vert c \vert < 1$ &
+ $\sum\limits_{i = 1}^\infty c^i = \frac{c}{1 - c} \quad \vert c \vert < 1$ &
+ $\sum\limits_{i = 0}^\infty ic^i = \frac{c}{(1 - c)^2} \quad \vert c \vert < 1$ \\
+ \grayhline
+
+ \multicolumn{2}{|lI}{
+ $\sum\limits_{i = 0}^n ic^i = \frac{nc^{n + 2} - (n + 1)c^{n + 1} + c}{(c - 1)^2} \quad c \neq 1$
+ } &
+ \multicolumn{2}{l|}{
+ $\sum\limits_{i = 1}^n iH_i = \frac{n(n + 1)}{2}H_n - \frac{n(n - 1)}{4}$
+ } \\
+ \grayhline
+
+ \multicolumn{2}{|lI}{
+ $\sum\limits_{i = 1}^n H_i = (n + 1)H_n - n$
+ } &
+ \multicolumn{2}{l|}{
+ $\sum\limits_{i = 1}^n \binom{i}{m}H_i =
+ \binom{n + 1}{m + 1} \left(H_{n + 1} - \frac{1}{m + 1}\right)$
+ } \\
+ \hline
+\end{tabularx}
diff --git a/content/math/tables/stuff.tex b/content/math/tables/stuff.tex
new file mode 100644
index 0000000..3cf8b4c
--- /dev/null
+++ b/content/math/tables/stuff.tex
@@ -0,0 +1,32 @@
+\begin{tabularx}{\linewidth}{|ll|}
+ \hline
+ \multicolumn{2}{|C|}{Verschiedenes} \\
+ \hline
+ Türme von Hanoi, minimale Schirttzahl: &
+ $T_n = 2^n - 1$ \\
+
+ \#Regionen zwischen $n$ Geraden &
+ $\frac{n\left(n + 1\right)}{2} + 1$ \\
+
+ \#abgeschlossene Regionen zwischen $n$ Geraden &
+ $\frac{n^2 - 3n + 2}{2}$ \\
+
+ \#markierte, gewurzelte Bäume &
+ $n^{n-1}$ \\
+
+ \#markierte, nicht gewurzelte Bäume &
+ $n^{n-2}$ \\
+
+ \#Wälder mit $k$ gewurzelten Bäumen &
+ $\frac{k}{n}\binom{n}{k}n^{n-k}$ \\
+
+ \#Wälder mit $k$ gewurzelten Bäumen mit vorgegebenen Wurzelknoten&
+ $\frac{k}{n}n^{n-k}$ \\
+
+ Derangements &
+ $!n = (n - 1)(!(n - 1) + !(n - 2)) = \left\lfloor\frac{n!}{e} + \frac{1}{2}\right\rfloor$ \\
+ &
+ $\lim\limits_{n \to \infty} \frac{!n}{n!} = \frac{1}{e}$ \\
+ \hline
+\end{tabularx}
+
diff --git a/content/math/tables/twelvefold.tex b/content/math/tables/twelvefold.tex
new file mode 100644
index 0000000..18d3955
--- /dev/null
+++ b/content/math/tables/twelvefold.tex
@@ -0,0 +1,32 @@
+\begin{expandtable}
+\begin{tabularx}{\linewidth}{|C|CICICIC|}
+ \hline
+ Bälle & identisch & verschieden & identisch & verschieden \\
+ Boxen & identisch & identisch & verschieden & verschieden \\
+ \hline
+ -- &
+ $p_k(n + k)$ &
+ $\sum\limits_{i = 0}^k \stirlingII{n}{i}$ &
+ $\binom{n + k - 1}{k - 1}$ &
+ $k^n$ \\
+ \grayhline
+
+ \makecell{Bälle pro\\Box $\geq 1$} &
+ $p_k(n)$ &
+ $\stirlingII{n}{k}$ &
+ $\binom{n - 1}{k - 1}$ &
+ $k! \stirlingII{n}{k}$ \\
+ \grayhline
+
+ \makecell{Bälle pro\\Box $\leq 1$} &
+ $[n \leq k]$ &
+ $[n \leq k]$ &
+ $\binom{k}{n}$ &
+ $n! \binom{k}{n}$ \\
+ \hline
+ \multicolumn{5}{|l|}{
+ $[\text{Bedingung}]$: \code{return Bedingung ? 1 : 0;}
+ } \\
+ \hline
+\end{tabularx}
+\end{expandtable}
diff --git a/content/math/transforms/andTransform.cpp b/content/math/transforms/andTransform.cpp
new file mode 100644
index 0000000..1fd9f5c
--- /dev/null
+++ b/content/math/transforms/andTransform.cpp
@@ -0,0 +1,8 @@
+void fft(vector<ll>& a, bool inv = false) {
+ int n = sz(a);
+ for (int s = 1; s < n; s *= 2) {
+ for (int i = 0; i < n; i += 2 * s) {
+ for (int j = i; j < i + s; j++) {
+ ll& u = a[j], &v = a[j + s];
+ tie(u, v) = inv ? pair(v - u, u) : pair(v, u + v);
+}}}}
diff --git a/content/math/transforms/bitwiseTransforms.cpp b/content/math/transforms/bitwiseTransforms.cpp
new file mode 100644
index 0000000..28561da
--- /dev/null
+++ b/content/math/transforms/bitwiseTransforms.cpp
@@ -0,0 +1,12 @@
+void bitwiseConv(vector<ll>& a, bool inv = false) {
+ int n = sz(a);
+ for (int s = 1; s < n; s *= 2) {
+ for (int i = 0; i < n; i += 2 * s) {
+ for (int j = i; j < i + s; j++) {
+ ll& u = a[j], &v = a[j + s];
+ tie(u, v) = inv ? pair(v - u, u) : pair(v, u + v); // AND
+ //tie(u, v) = inv ? pair(v, u - v) : pair(u + v, u); //OR
+ //tie(u, v) = pair(u + v, u - v); // XOR
+ }}}
+ //if (inv) for (ll& x : a) x /= n; // XOR (careful with MOD)
+}
diff --git a/content/math/transforms/fft.cpp b/content/math/transforms/fft.cpp
new file mode 100644
index 0000000..2bd95b2
--- /dev/null
+++ b/content/math/transforms/fft.cpp
@@ -0,0 +1,23 @@
+using cplx = complex<double>;
+
+void fft(vector<cplx>& a, bool inv = false) {
+ int n = sz(a);
+ for (int i = 0, j = 1; j < n - 1; ++j) {
+ for (int k = n >> 1; k > (i ^= k); k >>= 1);
+ if (j < i) swap(a[i], a[j]);
+ }
+ static vector<cplx> ws(2, 1);
+ for (static int k = 2; k < n; k *= 2) {
+ ws.resize(n);
+ cplx w = polar(1.0, acos(-1.0) / k);
+ for (int i=k; i<2*k; i++) ws[i] = ws[i/2] * (i % 2 ? w : 1);
+ }
+ for (int s = 1; s < n; s *= 2) {
+ for (int j = 0; j < n; j += 2 * s) {
+ for (int k = 0; k < s; k++) {
+ cplx u = a[j + k], t = a[j + s + k];
+ t *= (inv ? conj(ws[s + k]) : ws[s + k]);
+ a[j + k] = u + t;
+ a[j + s + k] = u - t;
+ if (inv) a[j + k] /= 2, a[j + s + k] /= 2;
+}}}}
diff --git a/content/math/transforms/fftMul.cpp b/content/math/transforms/fftMul.cpp
new file mode 100644
index 0000000..660ed79
--- /dev/null
+++ b/content/math/transforms/fftMul.cpp
@@ -0,0 +1,15 @@
+vector<cplx> mul(vector<ll>& a, vector<ll>& b) {
+ int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1);
+ vector<cplx> c(all(a)), d(n);
+ c.resize(n);
+ for (int i = 0; i < sz(b); i++) c[i] = {real(c[i]), b[i]};
+ fft(c);
+ for (int i = 0; i < n; i++) {
+ int j = (n - i) & (n - 1);
+ cplx x = (c[i] + conj(c[j])) / cplx{2, 0}; //fft(a)[i];
+ cplx y = (c[i] - conj(c[j])) / cplx{0, 2}; //fft(b)[i];
+ d[i] = x * y;
+ }
+ fft(d, true);
+ return d;
+}
diff --git a/content/math/transforms/multiplyBitwise.cpp b/content/math/transforms/multiplyBitwise.cpp
new file mode 100644
index 0000000..f7cf169
--- /dev/null
+++ b/content/math/transforms/multiplyBitwise.cpp
@@ -0,0 +1,8 @@
+vector<ll> mul(vector<ll> a, vector<ll> b) {
+ int n = 1 << (__lg(2 * max(sz(a), sz(b)) - 1));
+ a.resize(n), b.resize(n);
+ bitwiseConv(a), bitwiseConv(b);
+ for (int i=0; i<n; i++) a[i] *= b[i]; // MOD?
+ bitwiseConv(a, true);
+ return a;
+}
diff --git a/content/math/transforms/multiplyFFT.cpp b/content/math/transforms/multiplyFFT.cpp
new file mode 100644
index 0000000..0022d1f
--- /dev/null
+++ b/content/math/transforms/multiplyFFT.cpp
@@ -0,0 +1,12 @@
+vector<ll> mul(vector<ll>& a, vector<ll>& b) {
+ int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1);
+ vector<cplx> a2(all(a)), b2(all(b));
+ a2.resize(n), b2.resize(n);
+ fft(a2), fft(b2);
+ for (int i=0; i<n; i++) a2[i] *= b2[i];
+ fft(a2, true);
+
+ vector<ll> ans(n);
+ for (int i=0; i<n; i++) ans[i] = llround(a2[i].real());
+ return ans;
+}
diff --git a/content/math/transforms/multiplyNTT.cpp b/content/math/transforms/multiplyNTT.cpp
new file mode 100644
index 0000000..806d124
--- /dev/null
+++ b/content/math/transforms/multiplyNTT.cpp
@@ -0,0 +1,8 @@
+vector<ll> mul(vector<ll> a, vector<ll> b) {
+ int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1);
+ a.resize(n), b.resize(n);
+ ntt(a), ntt(b);
+ for (int i=0; i<n; i++) a[i] = a[i] * b[i] % mod;
+ ntt(a, true);
+ return a;
+}
diff --git a/content/math/transforms/ntt.cpp b/content/math/transforms/ntt.cpp
new file mode 100644
index 0000000..ca605d3
--- /dev/null
+++ b/content/math/transforms/ntt.cpp
@@ -0,0 +1,23 @@
+constexpr ll mod = 998244353, root = 3;
+
+void ntt(vector<ll>& a, bool inv = false) {
+ int n = sz(a);
+ auto b = a;
+ ll r = inv ? powMod(root, mod - 2, mod) : root;
+
+ for (int s = n / 2; s > 0; s /= 2) {
+ ll ws = powMod(r, (mod - 1) / (n / s), mod), w = 1;
+ for (int j = 0; j < n / 2; j += s) {
+ for (int k = j; k < j + s; k++) {
+ ll u = a[j + k], t = a[j + s + k] * w % mod;
+ b[k] = (u + t) % mod;
+ b[n/2 + k] = (u - t + mod) % mod;
+ }
+ w = w * ws % mod;
+ }
+ swap(a, b);
+ }
+ if (inv) {
+ ll div = powMod(n, mod - 2, mod);
+ for (auto& x : a) x = x * div % mod;
+}}
diff --git a/content/math/transforms/orTransform.cpp b/content/math/transforms/orTransform.cpp
new file mode 100644
index 0000000..eb1da44
--- /dev/null
+++ b/content/math/transforms/orTransform.cpp
@@ -0,0 +1,8 @@
+void fft(vector<ll>& a, bool inv = false) {
+ int n = sz(a);
+ for (int s = 1; s < n; s *= 2) {
+ for (int i = 0; i < n; i += 2 * s) {
+ for (int j = i; j < i + s; j++) {
+ ll& u = a[j], &v = a[j + s];
+ tie(u, v) = inv ? pair(v, u - v) : pair(u + v, u);
+}}}}
diff --git a/content/math/transforms/seriesOperations.cpp b/content/math/transforms/seriesOperations.cpp
new file mode 100644
index 0000000..4743674
--- /dev/null
+++ b/content/math/transforms/seriesOperations.cpp
@@ -0,0 +1,56 @@
+vector<ll> poly_inv(const vector<ll>& a, int n) {
+ vector<ll> q = {powMod(a[0], mod-2, mod)};
+ for (int len = 1; len < n; len *= 2){
+ vector<ll> a2 = a, q2 = q;
+ a2.resize(2*len), q2.resize(2*len);
+ ntt(q2);
+ for (int j : {0, 1}) {
+ ntt(a2);
+ for (int i = 0; i < 2*len; i++) a2[i] = a2[i]*q2[i] % mod;
+ ntt(a2, true);
+ for (int i = 0; i < len; i++) a2[i] = 0;
+ }
+ for (int i = len; i < min(n, 2*len); i++) {
+ q.push_back((mod - a2[i]) % mod);
+ }}
+ return q;
+}
+
+vector<ll> poly_deriv(vector<ll> a) {
+ for (int i = 1; i < sz(a); i++)
+ a[i-1] = a[i] * i % mod;
+ a.pop_back();
+ return a;
+}
+
+vector<ll> poly_integr(vector<ll> a) {
+ if (a.empty()) return {0};
+ a.push_back(a.back() * powMod(sz(a), mod-2, mod) % mod);
+ for (int i = sz(a)-2; i > 0; i--)
+ a[i] = a[i-1] * powMod(i, mod-2, mod) % mod;
+ a[0] = 0;
+ return a;
+}
+
+vector<ll> poly_log(vector<ll> a, int n) {
+ a = mul(poly_deriv(a), poly_inv(a, n));
+ a.resize(n-1);
+ a = poly_integr(a);
+ return a;
+}
+
+vector<ll> poly_exp(vector<ll> a, int n) {
+ vector<ll> q = {1};
+ for (int len = 1; len < n; len *= 2) {
+ vector<ll> p = poly_log(q, 2*len);
+ for (int i = 0; i < 2*len; i++)
+ p[i] = (mod - p[i] + (i < sz(a) ? a[i] : 0)) % mod;
+ vector<ll> q2 = q;
+ q2.resize(2*len);
+ ntt(p), ntt(q2);
+ for (int i = 0; i < 2*len; i++) p[i] = p[i] * q2[i] % mod;
+ ntt(p, true);
+ for (int i = len; i < min(n, 2*len); i++) q.push_back(p[i]);
+ }
+ return q;
+}
diff --git a/content/math/transforms/xorTransform.cpp b/content/math/transforms/xorTransform.cpp
new file mode 100644
index 0000000..f9d1d82
--- /dev/null
+++ b/content/math/transforms/xorTransform.cpp
@@ -0,0 +1,10 @@
+void fft(vector<ll>& a, bool inv = false) {
+ int n = sz(a);
+ for (int s = 1; s < n; s *= 2) {
+ for (int i = 0; i < n; i += 2 * s) {
+ for (int j = i; j < i + s; j++) {
+ ll& u = a[j], &v = a[j + s];
+ tie(u, v) = pair(u + v, u - v);
+ }}}
+ if (inv) for (ll& x : a) x /= n;
+}
diff --git a/content/other/bitOps.cpp b/content/other/bitOps.cpp
new file mode 100644
index 0000000..8079305
--- /dev/null
+++ b/content/other/bitOps.cpp
@@ -0,0 +1,18 @@
+// Iteriert über alle Teilmengen einer Bitmaske
+// (außer der leeren Menge).
+for (int subset = bitmask; subset > 0;
+ subset = (subset - 1) & bitmask)
+
+// Zählt Anzahl der gesetzten Bits.
+int numberOfSetBits(int i) {
+ i = i - ((i >> 1) & 0x5555'5555);
+ i = (i & 0x3333'3333) + ((i >> 2) & 0x3333'3333);
+ return (((i + (i >> 4)) & 0x0F0F'0F0F) * 0x0101'0101) >> 24;
+}
+
+// Nächste Permutation in Bitmaske
+// (z.B. 00111 => 01011 => 01101 => ...)
+ll nextPerm(ll v) {
+ ll t = v | (v - 1);
+ return (t+1) | (((~t & -~t) - 1) >> (__builtin_ctzll(v) + 1));
+}
diff --git a/content/other/compiletime.cpp b/content/other/compiletime.cpp
new file mode 100644
index 0000000..b71f83b
--- /dev/null
+++ b/content/other/compiletime.cpp
@@ -0,0 +1,7 @@
+template<int N>
+struct Table {
+ int data[N];
+ constexpr Table() : data {} {
+ for (int i = 0; i < N; i++) data[i] = i;
+}};
+constexpr Table<100'000> precalculated;
diff --git a/content/other/divideAndConquer.cpp b/content/other/divideAndConquer.cpp
new file mode 100644
index 0000000..830dc7f
--- /dev/null
+++ b/content/other/divideAndConquer.cpp
@@ -0,0 +1,27 @@
+vector<vector<ll>> dp;
+vector<vector<ll>> C;
+
+void rec(int i, int j0, int j1, int m0, int m1) {
+ if (j1 < j0) return;
+ int jmid = (j0 + j1) / 2;
+
+ dp[i][jmid] = inf;
+ int bestk = m0;
+ for (int k = m0; k < min(jmid, m1 + 1); ++k) {
+ if (dp[i - 1][k] + C[k + 1][jmid] < dp[i][jmid]) {
+ dp[i][jmid] = dp[i - 1][k] + C[k + 1][jmid];
+ bestk = k;
+ }}
+
+ rec(i, j0, jmid - 1, m0, bestk);
+ rec(i, jmid + 1, j1, bestk, m1);
+}
+
+ll calc(int n, int m) {
+ dp = vector<vector<ll>>(m, vector<ll>(n, inf));
+ for (int i = 0; i < n; i++) dp[0][i] = C[0][i];
+ for (int i = 1; i < m; i++) {
+ rec(i, 0, n - 1, 0, n - 1);
+ }
+ return dp[m - 1][n - 1];
+}
diff --git a/content/other/fastIO.cpp b/content/other/fastIO.cpp
new file mode 100644
index 0000000..9badcc7
--- /dev/null
+++ b/content/other/fastIO.cpp
@@ -0,0 +1,24 @@
+void fastscan(int& number) {
+ bool negative = false;
+ int c;
+ number = 0;
+ c = getchar();
+ while(c != '-' && (c < '0' || c > '9')) c = getchar();
+ if (c == '-') negative = true, c = getchar();
+ for (; c >= '0' && c <= '9'; c = getchar()) number = number * 10 + c - '0';
+ if (negative) number *= -1;
+}
+
+void printPositive(int n) {
+ if (n == 0) return;
+ printPositive(n / 10);
+ putchar(n % 10 + '0');
+}
+
+void fastprint(int n) {
+ if(n == 0) {putchar('0'); return;}
+ if (n < 0) {
+ putchar('-');
+ printPositive(-n);
+ } else printPositive(n);
+}
diff --git a/content/other/josephus2.cpp b/content/other/josephus2.cpp
new file mode 100644
index 0000000..5086e13
--- /dev/null
+++ b/content/other/josephus2.cpp
@@ -0,0 +1,8 @@
+int rotateLeft(int n) { // Der letzte Überlebende, 1-basiert.
+ for (int i = 31; i >= 0; i--) {
+ if (n & (1 << i)) {
+ n &= ~(1 << i);
+ break;
+ }}
+ n <<= 1; n++; return n;
+}
diff --git a/content/other/josephusK.cpp b/content/other/josephusK.cpp
new file mode 100644
index 0000000..5025f89
--- /dev/null
+++ b/content/other/josephusK.cpp
@@ -0,0 +1,5 @@
+// Der letzte Überlebende, 0-basiert.
+int josephus(int n, int k) {
+ if (n == 1) return 0;
+ return (josephus(n - 1, k) + k) % n;
+}
diff --git a/content/other/knuth.cpp b/content/other/knuth.cpp
new file mode 100644
index 0000000..1d513c8
--- /dev/null
+++ b/content/other/knuth.cpp
@@ -0,0 +1,15 @@
+ll calc(int n, int m, const vector<vector<ll>>& C) {
+ vector<vector<ll>> dp(m, vector<ll>(n, inf));
+ vector<vector<int>> opt(m, vector<int>(n + 1, n - 1));
+
+ for (int i = 0; i < n; i++) dp[0][i] = C[0][i];
+ for (int i = 1; i < m; i++) {
+ for (int j = n - 1; j >= 0; --j) {
+ opt[i][j] = i == 1 ? 0 : opt[i - 1][j];
+ for (int k = opt[i][j]; k <= min(opt[i][j+1], j-1); k++) {
+ if (dp[i][j] <= dp[i - 1][k] + C[k + 1][j]) continue;
+ dp[i][j] = dp[i - 1][k] + C[k + 1][j];
+ opt[i][j] = k;
+ }}}
+ return dp[m - 1][n - 1];
+}
diff --git a/content/other/other.tex b/content/other/other.tex
new file mode 100644
index 0000000..b47893f
--- /dev/null
+++ b/content/other/other.tex
@@ -0,0 +1,312 @@
+\section{Sonstiges}
+
+\begin{algorithm}{Compiletime}
+ \begin{itemize}
+ \item überprüfen ob Compilezeit Berechnungen erlaubt sind!
+ \item braucht \code{c++14} oder höher!
+ \end{itemize}
+ \sourcecode{other/compiletime.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Timed}
+ Kann benutzt werden um randomisierte Algorithmen so lange wie möglich laufen zu lassen.
+ \sourcecode{other/timed.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Bit Operations}
+ \begin{expandtable}
+ \begin{tabularx}{\linewidth}{|Ll|}
+ \hline
+ Bit an Position j lesen & \code{(x & (1 << j)) != 0} \\
+ Bit an Position j setzten & \code{x |= (1 << j)} \\
+ Bit an Position j löschen & \code{x &= ~(1 << j)} \\
+ Bit an Position j flippen & \code{x ^= (1 << j)} \\
+ Anzahl an führenden nullen ($x \neq 0$) & \code{__builtin_clzll(x)} \\
+ Anzahl an schließenden nullen ($x \neq 0$) & \code{__builtin_ctzll(x)} \\
+ Anzahl an \code{1} bits & \code{__builtin_popcountll(x)} \\
+ $i$-te Zahl eines Graycodes & \code{i ^ (i >> 1)} \\
+ \hline
+ \end{tabularx}\\
+ \end{expandtable}
+ \sourcecode{other/bitOps.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Overflow-sichere arithmetische Operationen}
+ Gibt zurück, ob es einen Overflow gab. Wenn nicht, enthält \code{c} das Ergebnis.
+ \begin{expandtable}
+ \begin{tabularx}{\linewidth}{|lR|}
+ \hline
+ Addition & \code{__builtin_saddll_overflow(a, b, &c)} \\
+ Subtraktion & \code{__builtin_ssubll_overflow(a, b, &c)} \\
+ Multiplikation & \code{__builtin_smulll_overflow(a, b, &c)} \\
+ \hline
+ \end{tabularx}
+ \end{expandtable}
+\end{algorithm}
+
+\begin{algorithm}{Pragmas}
+ \sourcecode{other/pragmas.cpp}
+\end{algorithm}
+
+\begin{algorithm}{DP Optimizations}
+ Aufgabe: Partitioniere Array in genau $m$ zusammenhängende Teile mit minimalen Kosten:
+ $dp[i][j] = \min_{k<j}\{dp[i-1][k-1]+C[k][j]\}$. Es sei $A[i][j]$ das \emph{minimale} optimale
+ $k$ bei der Berechnung von $dp[i][j]$.
+
+ \paragraph{\textsc{Knuth}-Optimization} Vorbedingung: $A[i - 1][j] \leq A[i][j] \leq A[i][j + 1]$
+
+ \method{calc}{berechnet das DP}{n^2}
+ \sourcecode{other/knuth.cpp}
+
+ \paragraph{Divide and Conquer}
+ Vorbedingung: $A[i][j - 1] \leq A[i][j]$.
+
+ \method{calc}{berechnet das DP}{m\*n\*\log(n)}
+ \sourcecode{other/divideAndConquer.cpp}
+
+ \paragraph{Quadrangle inequality} Die Bedingung $\forall a\leq b\leq c\leq d:
+ C[a][d] + C[b][c] \geq C[a][c] + C[b][d]$ ist hinreichend für beide Optimierungen.
+
+ \paragraph{Sum over Subsets DP} $\text{res}[\text{mask}]=\sum_{i\subseteq\text{mask}}\text{in}[i]$.
+ Für Summe über Supersets \code{res} einmal vorher und einmal nachher reversen.
+ \sourcecode{other/sos.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Parallel Binary Search}
+ \sourcecode{other/pbs.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Josephus-Problem}
+ $n$ Personen im Kreis, jeder $k$-te wird erschossen.
+ \begin{description}
+ \item[Spezialfall $\boldsymbol{k=2}$:] Betrachte $n$ Binär.
+ Für $n = 1b_1b_2b_3..b_n$ ist $b_1b_2b_3..b_n1$ die Position des letzten Überlebenden.
+ (Rotiere $n$ um eine Stelle nach links)
+ \end{description}
+ \sourcecode{other/josephus2.cpp}
+
+ \begin{description}
+ \item[Allgemein:] Sei $F(n,k)$ die Position des letzten Überlebenden.
+ Nummeriere die Personen mit $0, 1, \ldots, n-1$.
+ Nach Erschießen der $k$-ten Person, hat der Kreis noch Größe $n-1$ und die Position des Überlebenden ist jetzt $F(n-1,k)$.
+ Also: $F(n,k) = (F(n-1,k)+k)\%n$. Basisfall: $F(1,k) = 0$.
+ \end{description}
+ \sourcecode{other/josephusK.cpp}
+ \textbf{Beachte bei der Ausgabe, dass die Personen im ersten Fall von $\boldsymbol{1, \ldots, n}$ nummeriert sind, im zweiten Fall von $\boldsymbol{0, \ldots, n-1}$!}
+\end{algorithm}
+
+\begin{algorithm}[optional]{Zeileneingabe}
+ \sourcecode{other/split.cpp}
+\end{algorithm}
+
+\begin{algorithm}[optional]{Fast IO}
+ \sourcecode{other/fastIO.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Sonstiges}
+ \sourcecode{other/stuff.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Stress Test}
+ \sourcecode{other/stress.sh}
+\end{algorithm}
+
+\clearpage
+\subsection{Gemischtes}
+\begin{itemize}
+ \item \textbf{(Minimum) Flow mit Demand \textit{d}:}
+ Erstelle neue Quelle $s'$ und Senke $t'$ und setzte die folgenden Kapazitäten:
+ \begin{align*}
+ c'(s',v)&=\sum_{u\in{}V}d(u,v)&c'(v,t')&=\sum_{u\in{}V}d(v,u)\\[-0.5ex]
+ c'(u,v)&=c(u,v)-d(u,v)&c'(t,s)&=x
+ \end{align*}
+ Löse Fluss auf $G'$ mit \textsc{Dinic's Algorithmus}, wenn alle Kanten von $s'$ saturiert sind ist der Fluss in $G$ gültig. $x$ beschränkt den Fluss in $G$ (Binary-Search für minflow, $\infty$ sonst).
+ \item \textbf{\textsc{Johnsons} Reweighting Algorithmus:}
+ Initialisiere alle Entfernungen mit \texttt{d[i] = 0}. Berechne mit \textsc{Bellmann-Ford} kürzeste Entfernungen.
+ Falls es einen negativen Zyklus gibt abrrechen.
+ Sonst ändere die Gewichte von allen Kanten \texttt{(u,v)} im ursprünglichen Graphen zu \texttt{d[u]+w[u,v]-d[v]}.
+ Dann sind alle Kantengewichte nichtnegativ, \textsc{Dijkstra} kann angewendet werden.
+
+ \item \textbf{System von Differenzbeschränkungen:}
+ Ändere alle Bedingungen in die Form $a-b \leq c$.
+ Für jede Bedingung füge eine Kante \texttt{(b,a)} mit Gewicht \texttt{c} ein.
+ Füge Quelle \texttt{s} hinzu, mit Kanten zu allen Knoten mit Gewicht 0.
+ Nutze \textsc{Bellmann-Ford}, um die kürzesten Pfade von \texttt{s} aus zu finden.
+ \texttt{d[v]} ist mögliche Lösung für \texttt{v}.
+
+ \item \textbf{Min-Weight-Vertex-Cover im Bipartiten Graph:}
+ Partitioniere in \texttt{A, B} und füge Kanten \texttt{s}\,$\rightarrow$\,\texttt{A} mit Gewicht \texttt{w(A)} und Kanten \texttt{B}\,$\rightarrow$\,\texttt{t} mit Gewicht \texttt{w(B)} hinzu.
+ Füge Kanten mit Kapazität $\infty$ von \texttt{A} nach \texttt{B} hinzu, wo im originalen Graphen Kanten waren.
+ Max-Flow ist die Lösung.\newline
+ Im Residualgraphen:
+ \begin{itemize}
+ \item Das Vertex-Cover sind die Knoten inzident zu den Brücken. \emph{oder}
+ \item Die Knoten in \texttt{A}, die \emph{nicht} von \texttt{s} erreichbar sind und die Knoten in \texttt{B}, die von \texttt{erreichbar} sind.
+ \end{itemize}
+
+ \item \textbf{Allgemeiner Graph:}
+ Das Komplement eines Vertex-Cover ist ein Independent Set.
+ $\Rightarrow$ Max Weight Independent Set ist Komplement von Min Weight Vertex Cover.
+
+ \item \textbf{Bipartiter Graph:}
+ Min Vertex Cover (kleinste Menge Knoten, die alle Kanten berühren) = Max Matching.
+ Richte Kanten im Matching von $B$ nach $A$ und sonst von $A$ nach $B$, makiere alle Knoten die von einem ungematchten Knoten in $A$ erreichbar sind, das Vertex Cover sind die makierten Knoten aus $B$ und die unmakierten Knoten aus $A$.
+
+ \item \textbf{Bipartites Matching mit Gewichten auf linken Knoten:}
+ Minimiere Matchinggewicht.
+ Lösung: Sortiere Knoten links aufsteigend nach Gewicht, danach nutze normalen Algorithmus (\textsc{Kuhn}, Seite \pageref{kuhn})
+
+ \item \textbf{Satz von \textsc{Pick}:}
+ Sei $A$ der Flächeninhalt eines einfachen Gitterpolygons, $I$ die Anzahl der Gitterpunkte im Inneren und $R$ die Anzahl der Gitterpunkte auf dem Rand.
+ Es gilt:\vspace*{-\baselineskip}
+ \[
+ A = I + \frac{R}{2} - 1
+ \]
+
+ \item \textbf{Lemma von \textsc{Burnside}:}
+ Sei $G$ eine endliche Gruppe, die auf der Menge $X$ operiert.
+ Für jedes $g \in G$ sei $X^g$ die Menge der Fixpunkte bei Operation durch $g$, also $X^g = \{x \in X \mid g \bullet x = x\}$.
+ Dann gilt für die Anzahl der Bahnen $[X/G]$ der Operation:
+ \[
+ [X/G] = \frac{1}{\vert G \vert} \sum_{g \in G} \vert X^g \vert
+ \]
+
+ \item \textbf{\textsc{Polya} Counting:}
+ Sei $\pi$ eine Permutation der Menge $X$.
+ Die Elemente von $X$ können mit einer von $m$ Farben gefärbt werden.
+ Die Anzahl der Färbungen, die Fixpunkte von $\pi$ sind, ist $m^{\#(\pi)}$, wobei $\#(\pi)$ die Anzahl der Zyklen von $\pi$ ist.
+ Die Anzahl der Färbungen von Objekten einer Menge $X$ mit $m$ Farben unter einer Symmetriegruppe $G$ is gegeben durch:
+ \[
+ [X/G] = \frac{1}{\vert G \vert} \sum_{g \in G} m^{\#(g)}
+ \]
+
+ \item \textbf{Verteilung von Primzahlen:}
+ Für alle $n \in \mathbb{N}$ gilt: Ex existiert eine Primzahl $p$ mit $n \leq p \leq 2n$.
+
+ \item \textbf{Satz von \textsc{Kirchhoff}:}
+ Sei $G$ ein zusammenhängender, ungerichteter Graph evtl. mit Mehrfachkanten.
+ Sei $A$ die Adjazenzmatrix von $G$.
+ Dabei ist $a_{ij}$ die Anzahl der Kanten zwischen Knoten $i$ und $j$.
+ Sei $B$ eine Diagonalmatrix, $b_{ii}$ sei der Grad von Knoten $i$.
+ Definiere $R = B - A$.
+ Alle Kofaktoren von $R$ sind gleich und die Anzahl der Spannbäume von $G$.
+ \newline
+ Entferne letzte Zeile und Spalte und berechne Betrag der Determinante.
+
+ \item \textbf{\textsc{Dilworths}-Theorem:}
+ Sei $S$ eine Menge und $\leq$ eine partielle Ordnung ($S$ ist ein Poset).
+ Eine \emph{Kette} ist eine Teilmenge $\{x_1,\ldots,x_n\}$ mit $x_1 \leq \ldots \leq x_n$.
+ Eine \emph{Partition} ist eine Menge von Ketten, sodass jedes $s \in S$ in genau einer Kette ist.
+ Eine \emph{Antikette} ist eine Menge von Elementen, die paarweise nicht vergleichbar sind.
+ \newline
+ Es gilt: Die Größe der längsten Antikette gleicht der Größe der kleinsten Partition.
+ $\Rightarrow$ \emph{Weite} des Poset.
+ \newline
+ Berechnung: Maximales Matching in bipartitem Graphen.
+ Dupliziere jedes $s \in S$ in $u_s$ und $v_s$.
+ Falls $x \leq y$, füge Kante $u_x \to v_y$ hinzu.
+ Wenn Matching zu langsam ist, versuche Struktur des Posets auszunutzen und evtl. anders eine maximale Anitkette zu finden.
+
+ \item \textbf{\textsc{Turan}'s-Theorem:}
+ Die Anzahl an Kanten in einem Graphen mit $n$ Knoten der keine clique der größe $x+1$ enthält ist:
+ \begin{align*}
+ ext(n, K_{x+1}) &= \binom{n}{2} - \left[\left(x - (n \bmod x)\right) \cdot \binom{\floor{\frac{n}{x}}}{2} + \left(n\bmod x\right) \cdot \binom{\ceil{\frac{n}{x}}}{2}\right]
+ \end{align*}
+
+ \item \textbf{\textsc{Euler}'s-Polyedersatz:}
+ In planaren Graphen gilt $n-m+f-c=1$.
+
+ \item \textbf{\textsc{Pythagoreische Tripel}:}
+ Sei $m>n>0,~k>0$ und $m\not\equiv n \bmod 2$ dann beschreibt diese Formel alle Pythagoreischen Tripel eindeutig:
+ \[k~\cdot~\Big(~a=m^2-n^2,\quad b=2mn,\quad c=m^2+n^2~\Big)\]
+
+ \item \textbf{Centroids of a Tree:}
+ Ein \emph{Centroid} ist ein Knoten, der einen Baum in Komponenten der maximalen Größe $\frac{\abs{V}}{2}$ splitted.
+ Es kann $2$ Centroids geben!
+
+ \item \textbf{Centroid Decomposition:}
+ Wähle zufälligen Knoten und mache DFS.
+ Verschiebe ausgewählten Knoten in Richtung des tiefsten Teilbaums, bis Centroid gefunden. Entferne Knoten, mache rekursiv in Teilbäumen weiter. Laufzeit:~\runtime{\abs{V} \cdot \log(\abs{V})}.
+ \item \textbf{Gregorian Calendar:} Der Anfangstag des Jahres ist alle $400$ Jahre gleich.
+
+ \item \textbf{Pivotsuche und Rekursion auf linkem und rechtem Teilarray:}
+ Suche gleichzeitig von links und rechts nach Pivot, um Worst Case von
+ $\runtime{n^2}$ zu $\runtime{n\log n}$ zu verbessern.
+
+ \item \textbf{\textsc{Mo}'s Algorithm:}
+ SQRT-Decomposition auf $n$ Intervall Queries $[l,r]$.
+ Gruppiere Queries in $\sqrt{n}$ Blöcke nach linker Grenze $l$.
+ Sortiere nach Block und bei gleichem Block nach rechter Grenze $r$.
+ Beantworte Queries offline durch schrittweise Vergrößern/Verkleinern des aktuellen Intervalls.
+ Laufzeit:~\runtime{n\cdot\sqrt{n}}.
+ (Anzahl der Blöcke als Konstante in Code schreiben.)
+
+ \item \textbf{SQRT Techniques:}
+ \begin{itemize}
+ \item Aufteilen in \emph{leichte} (wert $\leq\sqrt{x}$) und \emph{schwere} (höchsten $\sqrt{x}$ viele) Objekte.
+ \item Datenstruktur in Blöcke fester Größe (z.b. 256 oder 512) aufteilen.
+ \item Datenstruktur nach fester Anzahl Updates komplett neu bauen.
+ \item Wenn die Summe über $x_i$ durch $X$ beschränkt ist, dann gibt es nur $\sqrt{2X}$ verschiedene Werte von $x_i$ (z.b. Längen von Strings).
+ \item Wenn $w\cdot h$ durch $X$ beschränkt ist, dann ist $\min(w,h)\leq\sqrt{X}$.
+ \end{itemize}
+
+ \item \textbf{Partition:}
+ Gegeben Gewichte $w_0+w_1+\cdots+w_k=W$, existiert eine Teilmenge mit Gewicht $x$?
+ Drei gleiche Gewichte $w$ können zu $w$ und $2w$ kombiniert werden ohne die Lösung zu ändern $\Rightarrow$ nur $2\sqrt{W}$ unterschiedliche Gewichte.
+ Mit bitsets daher selbst für $10^5$ lösbar.
+\end{itemize}
+
+\subsection{Tipps \& Tricks}
+
+\begin{itemize}
+ \item \textbf{Run Time Error:}
+ \begin{itemize}
+ \item Stack Overflow? Evtl. rekursive Tiefensuche auf langem Pfad?
+ \item Array-Grenzen überprüfen. Indizierung bei $0$ oder bei $1$ beginnen?
+ \item Abbruchbedingung bei Rekursion?
+ \item Evtl. Memory Limit Exceeded? Mit \code{/usr/bin/time -v} erhält man den maximalen Speicherverbrauch bei der Ausführung (Maximum resident set size).
+ \end{itemize}
+
+ \item \textbf{Strings:}
+ \begin{itemize}
+ \item Soll \codeSafe{"aa"} kleiner als \codeSafe{"z"} sein oder nicht?
+ \item bit \code{0x20} beeinflusst Groß-/Kleinschreibung.
+ \end{itemize}
+
+ \item \textbf{Zeilenbasierte Eingabe}:
+ \begin{itemize}
+ \item \code{getline(cin, str)} liest Zeile ein.
+ \item Wenn vorher \code{cin >> ...} benutzt, lese letztes \code{\\n} mit \code{getline(cin, x)}.
+ \end{itemize}
+
+ \item \textbf{Gleitkommazahlen:}
+ \begin{itemize}
+ \item \code{NaN}? Evtl. ungültige Werte für mathematische Funktionen, z.B. \mbox{\code{acos(1.00000000000001)}}?
+ \item Falsches Runden bei negativen Zahlen? Abschneiden $\neq$ Abrunden!
+ \item genügend Präzision oder Output in wissenschaftlicher Notation (\code{1e-25})?
+ \item Kann \code{-0.000} ausgegeben werden?
+ \end{itemize}
+
+ \item \textbf{Wrong Answer:}
+ \begin{itemize}
+ \item Lies Aufgabe erneut. Sorgfältig!
+ \item Mehrere Testfälle in einer Datei? Probiere gleichen Testcase mehrfach hintereinander.
+ \item Integer Overflow? Teste maximale Eingabegrößen und mache Überschlagsrechnung.
+ \item Ausgabeformat im 'unmöglich'-Fall überprüfen.
+ \item Ist das Ergebnis modulo einem Wert?
+ \item Integer Division rundet zur $0$ $\neq$ abrunden.
+ \item Eingabegrößen überprüfen. Sonderfälle ausprobieren.
+ \begin{itemize}
+ \item $n = 0$, $n = -1$, $n = 1$, $n = 2^{31}-1$, $n = -2^{31}$
+ \item $n$ gerade/ungerade
+ \item Graph ist leer/enthält nur einen Knoten.
+ \item Liste ist leer/enthält nur ein Element.
+ \item Graph ist Multigraph (enthält Schleifen/Mehrfachkanten).
+ \item Sind Kanten gerichtet/ungerichtet?
+ \item Kolineare Punkte existieren.
+ \item Polygon ist konkav/selbstschneidend.
+ \end{itemize}
+ \item Bei DP/Rekursion: Stimmt Basisfall?
+ \item Unsicher bei benutzten STL-Funktionen?
+ \end{itemize}
+\end{itemize}
diff --git a/content/other/pbs.cpp b/content/other/pbs.cpp
new file mode 100644
index 0000000..7cb60e5
--- /dev/null
+++ b/content/other/pbs.cpp
@@ -0,0 +1,19 @@
+// Q = # of queries, bucket sort is sometimes faster
+vector<int> low(Q, 0), high(Q, MAX_OPERATIONS);
+while (true) {
+ vector<pair<int, int>> focus;
+ for (int i = 0; i < Q; i++) if (low[i] < high[i]) {
+ focus.emplace_back((low[i] + high[i]) / 2, i);
+ }
+ if (focus.empty()) break;
+ sort(all(focus));
+
+ // reset simulation
+ for (int step = 0; auto [mid, i] : focus) {
+ while (step <= mid) {
+ // simulation step
+ step++;
+ }
+ if (/* requirement already fulfilled */) high[i] = mid;
+ else low[i] = mid + 1;
+}} // answer in low (and high)
diff --git a/content/other/pragmas.cpp b/content/other/pragmas.cpp
new file mode 100644
index 0000000..a39c850
--- /dev/null
+++ b/content/other/pragmas.cpp
@@ -0,0 +1,6 @@
+#pragma GCC optimize("Ofast")
+#pragma GCC optimize ("unroll-loops")
+#pragma GCC target("sse,sse2,sse3,ssse3,sse4,"
+ "popcnt,abm,mmx,avx,tune=native")
+#pragma GCC target("fpmath=sse,sse2") // no excess precision
+#pragma GCC target("fpmath=387") // force excess precision
diff --git a/content/other/sos.cpp b/content/other/sos.cpp
new file mode 100644
index 0000000..01bc44c
--- /dev/null
+++ b/content/other/sos.cpp
@@ -0,0 +1,6 @@
+vector<ll> res(in);
+for (int i = 1; i < sz(res); i *= 2) {
+ for (int mask = 0; mask < sz(res); mask++){
+ if (mask & i) {
+ res[mask] += res[mask ^ i];
+}}}
diff --git a/content/other/split.cpp b/content/other/split.cpp
new file mode 100644
index 0000000..5519f60
--- /dev/null
+++ b/content/other/split.cpp
@@ -0,0 +1,10 @@
+// Zerlegt s anhand aller Zeichen in delim (verändert s).
+vector<string> split(string& s, string delim) {
+ vector<string> result; char *token;
+ token = strtok(s.data(), delim.c_str());
+ while (token != nullptr) {
+ result.emplace_back(token);
+ token = strtok(nullptr, delim.c_str());
+ }
+ return result;
+}
diff --git a/content/other/stress.sh b/content/other/stress.sh
new file mode 100644
index 0000000..d264c2a
--- /dev/null
+++ b/content/other/stress.sh
@@ -0,0 +1,7 @@
+for i in {1..1000}; do
+ printf "\r$i"
+ python3 gen.py > input # generate test with gen.py
+ ./a.out < input > out # execute ./a.out
+ ./b.out < input > out2 # execute ./b.out
+ diff out out2 || break
+done
diff --git a/content/other/stuff.cpp b/content/other/stuff.cpp
new file mode 100644
index 0000000..41543ad
--- /dev/null
+++ b/content/other/stuff.cpp
@@ -0,0 +1,29 @@
+// Alles-Header.
+#include <bits/stdc++.h>
+
+// Setzt deutsche Tastaturlayout / toggle mit alt + space
+setxkbmap de
+setxkbmap de,us -option grp:alt_space_toggle
+
+// Schnelle Ein-/Ausgabe mit cin/cout.
+cin.tie(nullptr)->ios::sync_with_stdio(false);
+
+// Set mit eigener Sortierfunktion.
+set<point2, decltype(comp)> set1(comp);
+
+// STL-Debugging, Compiler flags.
+-D_GLIBCXX_DEBUG
+#define _GLIBCXX_DEBUG
+
+// 128-Bit Integer/Float. Muss zum Einlesen/Ausgeben
+// in einen int oder long long gecastet werden.
+__int128, __float128
+
+// float mit Decimaldarstellung
+#include <decimal/decimal>
+std::decimal::decimal128
+
+// 1e18 < INF < Max_Value / 2
+constexpr ll INF = 0x3FFF'FFFF'FFFF'FFFFll;
+// 1e9 < INF < Max_Value / 2
+constexpr int INF = 0x3FFF'FFFF;
diff --git a/content/other/timed.cpp b/content/other/timed.cpp
new file mode 100644
index 0000000..b3ed4ef
--- /dev/null
+++ b/content/other/timed.cpp
@@ -0,0 +1,3 @@
+int times = clock();
+//run for 900ms
+while (1000*(clock()-times)/CLOCKS_PER_SEC < 900) {...}
diff --git a/content/python/io.py b/content/python/io.py
new file mode 100644
index 0000000..aa16d4c
--- /dev/null
+++ b/content/python/io.py
@@ -0,0 +1,3 @@
+n, m = map(int, input().split())
+A = list(map(int, input().split()))
+print(n, m, *A)
diff --git a/content/python/python.tex b/content/python/python.tex
new file mode 100644
index 0000000..a778b85
--- /dev/null
+++ b/content/python/python.tex
@@ -0,0 +1,10 @@
+\section{Python}
+\bgroup
+\lstset{language=Python}
+
+\subsection{Recursion}
+\sourcecode{python/recursion.py}
+
+\subsection{IO}
+\sourcecode{python/io.py}
+\egroup
diff --git a/content/python/recursion.py b/content/python/recursion.py
new file mode 100644
index 0000000..45e0147
--- /dev/null
+++ b/content/python/recursion.py
@@ -0,0 +1,2 @@
+import sys
+sys.setrecursionlimit(1000_007)
diff --git a/content/string/ahoCorasick.cpp b/content/string/ahoCorasick.cpp
new file mode 100644
index 0000000..eac312c
--- /dev/null
+++ b/content/string/ahoCorasick.cpp
@@ -0,0 +1,52 @@
+constexpr ll ALPHABET_SIZE = 26, OFFSET = 'a';
+struct AhoCorasick {
+ struct vert {
+ int suffix = 0, ch, cnt = 0;
+ array<int, ALPHABET_SIZE> nxt = {};
+
+ vert(int p, int c) : suffix(-p), ch(c) {}
+ };
+ vector<vert> aho = {{0, -1}};
+
+ int addString(string &s) {
+ int v = 0;
+ for (auto c : s) {
+ int idx = c - OFFSET;
+ if (!aho[v].nxt[idx]) {
+ aho[v].nxt[idx] = sz(aho);
+ aho.emplace_back(v, idx);
+ }
+ v = aho[v].nxt[idx];
+ }
+ aho[v].cnt++;
+ return v; // trie node index of pattern (pattern state)
+ }
+
+ int getSuffix(int v) {
+ if (aho[v].suffix < 0) {
+ aho[v].suffix = go(getSuffix(-aho[v].suffix), aho[v].ch);
+ }
+ return aho[v].suffix;
+ }
+
+ int go(int v, int idx) { // Root is v=0, idx is char - OFFSET
+ if (aho[v].nxt[idx]) return aho[v].nxt[idx];
+ else return v == 0 ? 0 : go(getSuffix(v), idx);
+ }
+
+ vector<vector<int>> adj;
+ vector<ll> dp;
+ void buildGraph() {
+ adj.resize(sz(aho));
+ dp.assign(sz(aho), 0);
+ for (int i = 1; i < sz(aho); i++) {
+ adj[getSuffix(i)].push_back(i);
+ }}
+
+ void dfs(int v = 0) { // dp on tree
+ for (int u : adj[v]) {
+ //dp[u] = dp[v] + aho[u].cnt; // pattern count
+ dfs(u);
+ dp[v] += dp[u]; // no of matches
+ }}
+};
diff --git a/content/string/deBruijn.cpp b/content/string/deBruijn.cpp
new file mode 100644
index 0000000..e829137
--- /dev/null
+++ b/content/string/deBruijn.cpp
@@ -0,0 +1,7 @@
+string deBruijn(int n, char mi = '0', char ma = '1') {
+ string res, c(1, mi);
+ do {
+ if (n % sz(c) == 0) res += c;
+ } while(next(c, n, mi, ma));
+ return res;
+}
diff --git a/content/string/duval.cpp b/content/string/duval.cpp
new file mode 100644
index 0000000..bf36cce
--- /dev/null
+++ b/content/string/duval.cpp
@@ -0,0 +1,21 @@
+vector<pair<int, int>> duval(const string& s) {
+ vector<pair<int, int>> res;
+ for (int i = 0; i < sz(s);) {
+ int j = i + 1, k = i;
+ for (; j < sz(s) && s[k] <= s[j]; j++) {
+ if (s[k] < s[j]) k = i;
+ else k++;
+ }
+ while (i <= k) {
+ res.push_back({i, i + j - k});
+ i += j - k;
+ }}
+ return res;
+}
+
+int minrotation(const string& s) {
+ auto parts = duval(s+s);
+ for (auto [l, r] : parts) {
+ if (l < sz(s) && r >= sz(s)) {
+ return l;
+}}}
diff --git a/content/string/kmp.cpp b/content/string/kmp.cpp
new file mode 100644
index 0000000..421479e
--- /dev/null
+++ b/content/string/kmp.cpp
@@ -0,0 +1,20 @@
+vector<int> kmpPreprocessing(const string& sub) {
+ vector<int> b(sz(sub) + 1);
+ b[0] = -1;
+ for (int i = 0, j = -1; i < sz(sub);) {
+ while (j >= 0 && sub[i] != sub[j]) j = b[j];
+ b[++i] = ++j;
+ }
+ return b;
+}
+vector<int> kmpSearch(const string& s, const string& sub) {
+ vector<int> result, pre = kmpPreprocessing(sub);
+ for (int i = 0, j = 0; i < sz(s);) {
+ while (j >= 0 && s[i] != sub[j]) j = pre[j];
+ i++; j++;
+ if (j == sz(sub)) {
+ result.push_back(i - j);
+ j = pre[j];
+ }}
+ return result;
+}
diff --git a/content/string/longestCommonSubsequence.cpp b/content/string/longestCommonSubsequence.cpp
new file mode 100644
index 0000000..6c9ea44
--- /dev/null
+++ b/content/string/longestCommonSubsequence.cpp
@@ -0,0 +1,15 @@
+string lcss(const string& a, const string& b) {
+ vector<vector<int>> m(sz(a) + 1, vector<int>(sz(b) + 1));
+ for (int i = sz(a) - 1; i >= 0; i--) {
+ for (int j = sz(b) - 1; j >= 0; j--) {
+ if (a[i] == b[j]) m[i][j] = 1 + m[i+1][j+1];
+ else m[i][j] = max(m[i+1][j], m[i][j+1]);
+ }} // Für die Länge: return m[0][0];
+ string res;
+ for (int j = 0, i = 0; j < sz(b) && i < sz(a);) {
+ if (a[i] == b[j]) res += a[i++], j++;
+ else if (m[i][j+1] > m[i+1][j]) j++;
+ else i++;
+ }
+ return res;
+}
diff --git a/content/string/lyndon.cpp b/content/string/lyndon.cpp
new file mode 100644
index 0000000..e44379b
--- /dev/null
+++ b/content/string/lyndon.cpp
@@ -0,0 +1,11 @@
+bool next(string& s, int maxLen, char mi = '0', char ma = '1') {
+ for (int i = sz(s), j = sz(s); i < maxLen; i++)
+ s.push_back(s[i % j]);
+ while(!s.empty() && s.back() == ma) s.pop_back();
+ if (s.empty()) {
+ s = mi;
+ return false;
+ } else {
+ s.back()++;
+ return true;
+}}
diff --git a/content/string/manacher.cpp b/content/string/manacher.cpp
new file mode 100644
index 0000000..112bd55
--- /dev/null
+++ b/content/string/manacher.cpp
@@ -0,0 +1,20 @@
+vector<int> manacher(const string& t) {
+ //transforms "aa" to ".a.a." to find even length palindromes
+ string s(sz(t) * 2 + 1, '.');
+ for (int i = 0; i < sz(t); i++) s[2 * i + 1] = t[i];
+
+ int mid = 0, r = 0, n = sz(s);
+ vector<int> pal(n);
+ for (int i = 1; i < n - 1; i++) {
+ if (r > i) pal[i] = min(r - i, pal[2 * mid - i]);
+ while (pal[i] < min(i, n - i - 1) &&
+ s[i + pal[i] + 1] == s[i - pal[i] - 1]) {
+ pal[i]++;
+ }
+ if (i + pal[i] > r) mid = i, r = i + pal[i];
+ }
+
+ //convert lengths to constructed string s (optional)
+ //for (int i = 0; i < n; i++) pal[i] = 2 * pal[i] + 1;
+ return pal;
+}
diff --git a/content/string/rollingHash.cpp b/content/string/rollingHash.cpp
new file mode 100644
index 0000000..6e914aa
--- /dev/null
+++ b/content/string/rollingHash.cpp
@@ -0,0 +1,18 @@
+// M = 1.7e9 + 9, 1e18L + 9, 2.2e18L + 7
+struct Hash {
+ static constexpr ll M = 3e18L + 37;
+ static constexpr ll Q = 318LL << 53; // Random in [SIGMA+1, M)
+ vector<ll> pref = {0}, power = {1};
+
+ Hash(const string& s) {
+ for (auto c : s) { // c > 0
+ pref.push_back((mul(pref.back(), Q) + c + M) % M);
+ power.push_back(mul(power.back(), Q));
+ }}
+
+ ll operator()(int l, int r) {
+ return (pref[r] - mul(power[r-l], pref[l]) + M) % M;
+ }
+
+ static ll mul(__int128 a, ll b) {return a * b % M;}
+};
diff --git a/content/string/rollingHashCf.cpp b/content/string/rollingHashCf.cpp
new file mode 100644
index 0000000..84b2e4e
--- /dev/null
+++ b/content/string/rollingHashCf.cpp
@@ -0,0 +1,17 @@
+// M = 1.7e9 + 9, 1e18L + 9, 2.2e18L + 7
+struct Hash {
+ static constexpr ll M = 3e18L + 37;
+ vector<ll> pref = {0}, power = {1};
+
+ Hash(const string& s, ll Q) { // Q Random in [SIGMA+1, M)
+ for (auto c : s) { // c > 0
+ pref.push_back((mul(pref.back(), Q) + c + M) % M);
+ power.push_back(mul(power.back(), Q));
+ }}
+
+ ll operator()(int l, int r) {
+ return (pref[r] - mul(power[r-l], pref[l]) + M) % M;
+ }
+
+ static ll mul(__int128 a, ll b) {return a * b % M;}
+};
diff --git a/content/string/string.tex b/content/string/string.tex
new file mode 100644
index 0000000..bedabfb
--- /dev/null
+++ b/content/string/string.tex
@@ -0,0 +1,132 @@
+\section{Strings}
+
+\begin{algorithm}{\textsc{Knuth-Morris-Pratt}-Algorithmus}
+ \begin{methods}
+ \method{kmpSearch}{sucht \code{sub} in \code{s}}{\abs{s}+\abs{sub}}
+ \end{methods}
+ \sourcecode{string/kmp.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Z-Algorithmus}
+ \begin{methods}[ll]
+ $z_i\coloneqq$ Längstes gemeinsames Präfix von $s_0\cdots s_{n-1}$ und $s_i\cdots s_{n-1}$ & \runtime{n}
+ \end{methods}
+ Suchen: Z-Algorithmus auf \code{P\$S} ausführen, Positionen mit $z_i=\abs{P}$ zurückgeben
+ \sourcecode{string/z.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Rolling Hash}
+ \sourcecode{string/rollingHash.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Pattern Matching mit Wildcards}
+ Gegeben zwei strings $A$ und $B$,$B$ enthält $k$ \emph{wildcards} enthält. Sei:
+ \begin{align*}
+ a_i&=\cos(\alpha_i) + i\sin(\alpha_i) &\text{ mit } \alpha_i&=\frac{2\pi A[i]}{\Sigma}\\
+ b_i&=\cos(\beta_i) + i\sin(\beta_i) &\text{ mit } \beta_i&=\begin{cases*}
+ \frac{2\pi B[\abs{B}-i-1]}{\Sigma} & falls $B[\abs{B}-i-1]\in\Sigma$ \\
+ 0 & sonst
+ \end{cases*}
+ \end{align*}
+ $B$ matcht $A$ an stelle $i$ wenn $(b\cdot a)[|B|-1+i]=|B|-k$.
+ Benutze FFT um $(b\cdot a)$ zu berechnen.
+\end{algorithm}
+
+\begin{algorithm}{\textsc{Manacher}'s Algorithm, Longest Palindrome}
+ \begin{methods}
+ \method{init}{transformiert \code{string a}}{n}
+ \method{manacher}{berechnet Längen der Palindrome in longest}{n}
+ \end{methods}
+ \sourcecode{string/manacher.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Longest Common Subsequence}
+ \begin{methods}
+ \method{lcss}{findet längste gemeinsame Sequenz}{\abs{a}\*\abs{b}}
+ \end{methods}
+ \sourcecode{string/longestCommonSubsequence.cpp}
+\end{algorithm}
+
+\columnbreak
+\begin{algorithm}{\textsc{Aho-Corasick}-Automat}
+ \begin{methods}[ll]
+ sucht patterns im Text & \runtime{\abs{Text}+\sum\abs{pattern}}
+ \end{methods}
+ \begin{enumerate}
+ \item mit \code{addString(pattern, idx)} Patterns hinzufügen.
+ \item rufe \code{buildGraph()} auf
+ \item mit \code{state = go(state, idx)} in nächsten Zustand wechseln.
+ \item erhöhe dabei \code{dp[state]++}
+ \item rufe \code{dfs()} auf. In dp[pattern state] stehen die Anzahl der Matches
+ \end{enumerate}
+ \sourcecode{string/ahoCorasick.cpp}
+\end{algorithm}
+\clearpage
+
+\begin{algorithm}{Lyndon und De-Bruijn}
+ \begin{itemize}
+ \item \textbf{Lyndon-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen.
+ \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von Lyndon-Worten zerlegt werden.
+ \item Für Lyndon-Worte $u, v$ mit $u<v$ gilt, dass $uv$ auch ein Lyndon-Wort ist.
+ \end{itemize}
+ \begin{methods}
+ \method[, Durchschnitt $\Theta(1)$]{next}{lexikographisch nächstes Lyndon-Wort}{n}
+ \method{duval}{zerlegt $s$ in Lyndon-Worte}{n}
+ \method{minrotation}{berechnet kleinste Rotation von $s$}{n}
+ \end{methods}
+ \sourcecode{string/lyndon.cpp}
+ \sourcecode{string/duval.cpp}
+ \begin{itemize}
+ \item \textbf{De-Bruijn-Sequenze $\boldsymbol{B(\Sigma, n)}$:}~~~ein Wort das jedes Wort der Länge $n$ genau einmal als substring enthält (und minimal ist). Wobei $B(\Sigma, n)$ zyklisch betrachtet wird.
+ \item es gibt $\frac{(k!)^{k^{n-1}}}{k^{n}}$ verschiedene $B(\Sigma, n)$
+ \item $B(\Sigma, n)$ hat Länge $\abs{\Sigma}^n$
+ \end{itemize}
+ \begin{methods}
+ \method{deBruijn}{berechnet ein festes $B(\Sigma, n)$}{\abs{\Sigma}^n}
+ \end{methods}
+ \sourcecode{string/deBruijn.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Suffix-Array}
+\begin{methods}
+ \method{SuffixArray}{berechnet ein Suffix Array}{\abs{s}\*\log^2(\abs{s})}
+ \method{lcp}{berechnet Länge des longest common prefix}{\log(\abs{s})}
+ \method{}{von \code{s[x]} und \code{s[y]}}{}
+\end{methods}
+\sourcecode{string/suffixArray.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Suffix-Baum}
+ \begin{methods}
+ \method{SuffixTree}{berechnet einen Suffixbaum}{\abs{s}}
+ \method{extend}{fügt den nächsten Buchstaben aus \code{s} ein}{1}
+ \end{methods}
+ \sourcecode{string/suffixTree.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Suffix-Automaton}
+ \begin{itemize}
+ \item \textbf{Ist \textit{w} Substring von \textit{s}?}
+ Baue Automaten für \textit{s} und wende ihn auf \textit{w} an.
+ Wenn alle Übergänge vorhanden sind, ist \textit{w} Substring von \textit{s}.
+
+ \item \textbf{Ist \textit{w} Suffix von \textit{s}?}
+ Wie oben und prüfe, ob Endzustand ein Terminal ist.
+
+ \item \textbf{Anzahl verschiedener Substrings.}
+ Jeder Pfad im Automaten entspricht einem Substring.
+ Für einen Knoten ist die Anzahl der ausgehenden Pfade gleich der Summe über die Anzahlen der Kindknoten plus 1.
+ Der letzte Summand ist der Pfad, der in diesem Knoten endet.
+
+ \item \textbf{Wie oft taucht \textit{w} in \textit{s} auf?}
+ Sei \textit{p} der Zustand nach Abarbeitung von \textit{w}.
+ Lösung ist Anzahl der Pfade, die in \textit{p} starten und in einem Terminal enden.
+ Diese Zahl lässt sich wie oben rekursiv berechnen.
+ Bei jedem Knoten darf nur dann plus 1 gerechnet werden, wenn es ein Terminal ist.
+ \end{itemize}
+ \sourcecode{string/suffixAutomaton.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Trie}
+ \sourcecode{string/trie.cpp}
+\end{algorithm}
diff --git a/content/string/suffixArray.cpp b/content/string/suffixArray.cpp
new file mode 100644
index 0000000..8b698d2
--- /dev/null
+++ b/content/string/suffixArray.cpp
@@ -0,0 +1,38 @@
+constexpr int MAX_CHAR = 256;
+struct SuffixArray {
+ int n;
+ vector<int> SA, LCP;
+ vector<vector<int>> P;
+
+ SuffixArray(const string& s) : n(sz(s)), SA(n), LCP(n),
+ P(__lg(2 * n - 1) + 1, vector<int>(n)) {
+ P[0].assign(all(s));
+ iota(all(SA), 0);
+ sort(all(SA), [&](int a, int b) {return s[a] < s[b];});
+ vector<int> x(n);
+ for (int k = 1, c = 1; c < n; k++, c *= 2) {
+ iota(all(x), n - c);
+ for (int ptr = c; int i : SA) if (i >= c) x[ptr++] = i - c;
+
+ vector<int> cnt(k == 1 ? MAX_CHAR : n);
+ for (int i : P[k-1]) cnt[i]++;
+ partial_sum(all(cnt), begin(cnt));
+ for (int i : x | views::reverse) SA[--cnt[P[k-1][i]]] = i;
+
+ auto p = [&](int i) {return i < n ? P[k-1][i] : -1;};
+ for (int i = 1; i < n; i++) {
+ int a = SA[i-1], b = SA[i];
+ P[k][b] = P[k][a] + (p(a) != p(b) || p(a+c) != p(b+c));
+ }}
+ for (int i = 1; i < n; i++) LCP[i] = lcp(SA[i-1], SA[i]);
+ }
+
+ int lcp(int x, int y) {//x & y are text-indices, not SA-indices
+ if (x == y) return n - x;
+ int res = 0;
+ for (int i = sz(P) - 1; i >= 0 && max(x, y) + res < n; i--) {
+ if (P[i][x + res] == P[i][y + res]) res |= 1 << i;
+ }
+ return res;
+ }
+};
diff --git a/content/string/suffixAutomaton.cpp b/content/string/suffixAutomaton.cpp
new file mode 100644
index 0000000..9a68cb3
--- /dev/null
+++ b/content/string/suffixAutomaton.cpp
@@ -0,0 +1,63 @@
+constexpr int ALPHABET_SIZE = 26;
+constexpr char OFFSET = 'a';
+struct SuffixAutomaton {
+ struct State {
+ int len, link = -1;
+ array<int, ALPHABET_SIZE> nxt; // map if large Alphabet
+ State(int l) : len(l) {fill(all(nxt), -1);}
+ };
+
+ vector<State> st = {State(0)};
+ int cur = 0;
+
+ SuffixAutomaton(const string& s) {
+ st.reserve(2 * sz(s));
+ for (auto c : s) extend(c - OFFSET);
+ }
+
+ void extend(int c) {
+ int p = cur;
+ cur = sz(st);
+ st.emplace_back(st[p].len + 1);
+ for (; p != -1 && st[p].nxt[c] < 0; p = st[p].link) {
+ st[p].nxt[c] = cur;
+ }
+ if (p == -1) {
+ st[cur].link = 0;
+ } else {
+ int q = st[p].nxt[c];
+ if (st[p].len + 1 == st[q].len) {
+ st[cur].link = q;
+ } else {
+ st.emplace_back(st[p].len + 1);
+ st.back().link = st[q].link;
+ st.back().nxt = st[q].nxt;
+ for (; p != -1 && st[p].nxt[c] == q; p = st[p].link) {
+ st[p].nxt[c] = sz(st) - 1;
+ }
+ st[q].link = st[cur].link = sz(st) - 1;
+ }}}
+
+ vector<int> calculateTerminals() {
+ vector<int> terminals;
+ for (int p = cur; p != -1; p = st[p].link) {
+ terminals.push_back(p);
+ }
+ return terminals;
+ }
+
+ // Pair with start index (in t) and length of LCS.
+ pair<int, int> longestCommonSubstring(const string& t) {
+ int v = 0, l = 0, best = 0, bestp = -1;
+ for (int i = 0; i < sz(t); i++) {
+ int c = t[i] - OFFSET;
+ while (v > 0 && st[v].nxt[c] < 0) {
+ v = st[v].link;
+ l = st[v].len;
+ }
+ if (st[v].nxt[c] >= 0) v = st[v].nxt[c], l++;
+ if (l > best) best = l, bestp = i;
+ }
+ return {bestp - best + 1, best};
+ }
+};
diff --git a/content/string/suffixTree.cpp b/content/string/suffixTree.cpp
new file mode 100644
index 0000000..7112f39
--- /dev/null
+++ b/content/string/suffixTree.cpp
@@ -0,0 +1,72 @@
+struct SuffixTree {
+ struct Vert {
+ int start, end, suf; //s[start...end) along parent edge
+ map<char, int> nxt;
+ };
+ string s;
+ int needsSuffix, pos, remainder, curVert, curEdge, curLen;
+ // Each Vertex gives its children range as [start, end)
+ vector<Vert> tree = {Vert{-1, -1, 0, {}}};
+
+ SuffixTree(const string& s_) : s(s_) {
+ needsSuffix = remainder = curVert = curEdge = curLen = 0;
+ pos = -1;
+ for (int i = 0; i < sz(s); i++) extend();
+ }
+
+ int newVert(int start, int end) {
+ tree.push_back({start, end, 0, {}});
+ return sz(tree) - 1;
+ }
+
+ void addSuffixLink(int vert) {
+ if (needsSuffix) tree[needsSuffix].suf = vert;
+ needsSuffix = vert;
+ }
+
+ bool fullImplicitEdge(int vert) {
+ int len = min(tree[vert].end, pos + 1) - tree[vert].start;
+ if (curLen >= len) {
+ curEdge += len;
+ curLen -= len;
+ curVert = vert;
+ return true;
+ } else {
+ return false;
+ }}
+
+ void extend() {
+ pos++;
+ needsSuffix = 0;
+ remainder++;
+ while (remainder) {
+ if (curLen == 0) curEdge = pos;
+ if (!tree[curVert].nxt.count(s[curEdge])) {
+ int leaf = newVert(pos, sz(s));
+ tree[curVert].nxt[s[curEdge]] = leaf;
+ addSuffixLink(curVert);
+ } else {
+ int nxt = tree[curVert].nxt[s[curEdge]];
+ if (fullImplicitEdge(nxt)) continue;
+ if (s[tree[nxt].start + curLen] == s[pos]) {
+ curLen++;
+ addSuffixLink(curVert);
+ break;
+ }
+ int split = newVert(tree[nxt].start,
+ tree[nxt].start + curLen);
+ tree[curVert].nxt[s[curEdge]] = split;
+ int leaf = newVert(pos, sz(s));
+ tree[split].nxt[s[pos]] = leaf;
+ tree[nxt].start += curLen;
+ tree[split].nxt[s[tree[nxt].start]] = nxt;
+ addSuffixLink(split);
+ }
+ remainder--;
+ if (curVert == 0 && curLen) {
+ curLen--;
+ curEdge = pos - remainder + 1;
+ } else {
+ curVert = tree[curVert].suf ? tree[curVert].suf : 0;
+ }}}
+}; \ No newline at end of file
diff --git a/content/string/trie.cpp b/content/string/trie.cpp
new file mode 100644
index 0000000..03cf947
--- /dev/null
+++ b/content/string/trie.cpp
@@ -0,0 +1,35 @@
+// Zahlenwerte müssen bei 0 beginnen und zusammenhängend sein.
+constexpr int ALPHABET_SIZE = 2;
+struct node {
+ int words, ends;
+ array<int, ALPHABET_SIZE> nxt;
+ node() : words(0), ends(0) {fill(all(nxt), -1);}
+};
+vector<node> trie = {node()};
+
+int traverse(const vector<int>& word, int x) {
+ int id = 0;
+ for (int c : word) {
+ if (id < 0 || (trie[id].words == 0 && x <= 0)) return -1;
+ trie[id].words += x;
+ if (trie[id].nxt[c] < 0 && x > 0) {
+ trie[id].nxt[c] = sz(trie);
+ trie.emplace_back();
+ }
+ id = trie[id].nxt[c];
+ }
+ trie[id].words += x;
+ trie[id].ends += x;
+ return id;
+}
+
+int insert(const vector<int>& word) {
+ return traverse(word, 1);
+}
+
+bool erase(const vector<int>& word) {
+ int id = traverse(word, 0);
+ if (id < 0 || trie[id].ends <= 0) return false;
+ traverse(word, -1);
+ return true;
+}
diff --git a/content/string/z.cpp b/content/string/z.cpp
new file mode 100644
index 0000000..069fa38
--- /dev/null
+++ b/content/string/z.cpp
@@ -0,0 +1,10 @@
+vector<int> Z(const string& s) {
+ int n = sz(s);
+ vector<int> z(n);
+ for (int i = 1, x = 0; i < n; i++) {
+ z[i] = max(0, min(z[i - x], x + z[x] - i));
+ while (i + z[i] < n && s[z[i]] == s[i + z[i]]) {
+ x = i, z[i]++;
+ }}
+ return z;
+}
diff --git a/content/tcr.tex b/content/tcr.tex
new file mode 100644
index 0000000..b327b37
--- /dev/null
+++ b/content/tcr.tex
@@ -0,0 +1,65 @@
+
+%maybe size 9pt if too many pages
+\documentclass[a4paper,fontsize=7.8pt]{scrartcl}
+
+% General information.
+\newcommand{\teamname}{Kindergarten Timelimit}
+\newcommand{\university}{Karlsruhe Institute of Technology}
+
+% Options
+\newif\ifoptional
+%\optionaltrue
+
+% Font encoding.
+\usepackage[T1]{fontenc}
+\usepackage[ngerman]{babel}
+\usepackage[utf8]{inputenc}
+\usepackage[hidelinks,pdfencoding=auto]{hyperref}
+
+% Include headers.
+\usepackage{latexHeaders/layout}
+\usepackage{latexHeaders/math}
+\usepackage{latexHeaders/code}
+\usepackage{latexHeaders/commands}
+
+% Title and author information.
+\title{Team Contest Reference}
+\author{\teamname \\ \university}
+\date{\today}
+\begin{document}
+
+% Titlepage with table of contents.
+\setlength{\columnsep}{1cm}
+\optional{
+\maketitle
+\begin{multicols*}{3}
+ \tableofcontents
+\end{multicols*}
+}
+
+\newpage
+
+% Content.
+\begin{multicols*}{3}
+ \input{datastructures/datastructures}
+ \input{graph/graph}
+ \input{geometry/geometry}
+ \input{math/math}
+\end{multicols*}
+ \clearpage
+ \input{math/tables}
+\begin{multicols*}{3}
+ \input{string/string}
+ \input{python/python}
+ \input{other/other}
+ \input{template/template}
+ \clearpage
+ \ifodd\value{page}
+ \else
+ \null
+ \thispagestyle{empty}
+ \clearpage
+ \fi
+ \input{tests/test}
+\end{multicols*}
+\end{document}
diff --git a/content/template/console.sh b/content/template/console.sh
new file mode 100644
index 0000000..31885e9
--- /dev/null
+++ b/content/template/console.sh
@@ -0,0 +1,2 @@
+alias comp="g++ -std=gnu++17 -O2 -Wall -Wextra -Wconversion -Wshadow"
+alias dbg="comp -g -fsanitize=address,undefined"
diff --git a/content/template/template.cpp b/content/template/template.cpp
new file mode 100644
index 0000000..c9a492c
--- /dev/null
+++ b/content/template/template.cpp
@@ -0,0 +1,17 @@
+#include <bits/stdc++.h>
+using namespace std;
+
+#define tsolve int t; cin >> t; while(t--) solve
+#define all(x) ::begin(x), ::end(x)
+#define sz(x) (ll)::size(x)
+
+using ll = long long;
+using ld = long double;
+
+void solve() {}
+
+int main() {
+ cin.tie(0)->sync_with_stdio(false);
+ cout << setprecision(16);
+ solve();
+}
diff --git a/content/template/template.tex b/content/template/template.tex
new file mode 100644
index 0000000..bf82199
--- /dev/null
+++ b/content/template/template.tex
@@ -0,0 +1,9 @@
+\section{Template}
+
+\begin{algorithm}{C++}
+ \sourcecode{template/template.cpp}
+\end{algorithm}
+
+\begin{algorithm}{Console}
+ \sourcecode{template/console.sh}
+\end{algorithm}
diff --git a/content/tests/gcc5bug.cpp b/content/tests/gcc5bug.cpp
new file mode 100644
index 0000000..f49603e
--- /dev/null
+++ b/content/tests/gcc5bug.cpp
@@ -0,0 +1,4 @@
+//https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68203
+struct A {
+ pair<int, int> values[1000000];
+};
diff --git a/content/tests/precision.cpp b/content/tests/precision.cpp
new file mode 100644
index 0000000..0c81ae1
--- /dev/null
+++ b/content/tests/precision.cpp
@@ -0,0 +1,8 @@
+#include <cfloat>
+
+int main() {
+ cout << "Mode: " << FLT_EVAL_METHOD << endl;
+ double a = atof("1.2345678");
+ double b = a*a;
+ cout << b - 1.52415765279683990130 << '\n';
+}
diff --git a/content/tests/test.tex b/content/tests/test.tex
new file mode 100644
index 0000000..80ac037
--- /dev/null
+++ b/content/tests/test.tex
@@ -0,0 +1,43 @@
+\section{Tests}
+Dieser Abschnitt enthält lediglich Dinge die während der Practicesession getestet werden sollten!
+
+\subsection{GCC}
+\begin{itemize}
+ \item sind c++14 Feature vorhanden?
+ \item sind c++17 Feature vorhanden?
+ \item kompiliert dieser Code:
+\end{itemize}
+\sourcecode{tests/gcc5bug.cpp}
+\begin{itemize}
+ \item funktioniert \code{__int128}?
+ \item funktionieren Pragmas?
+ \item funktionieren \code{constexpr} zur Compilezeit (+Zeitlimit)?
+ \item wie groß ist \code{sizeof(char*)}?
+ \item wie groß ist \code{RAND_MAX}?
+ \item funktioniert \code{random_device}? (und gib es unerschiedliche Ergebnisse?)
+ \item funktioniert \code{clock()}?
+\end{itemize}
+
+\subsection{Python}
+\begin{itemize}
+ \item Rekursionslimit?
+\end{itemize}
+
+\subsection{Judge}
+\begin{itemize}
+ \item ist der Checker casesensitive?
+ \item wie werden zusätzliches Whitespacecharacter bei sonst korrektem Output behandelt?
+ \item vergleiche ausführungszeit auf dem judge und lokal (z.b. mit Primzahl Sieb)
+\end{itemize}
+\sourcecode{tests/whitespace.cpp}
+
+\subsection{Precision}
+\begin{itemize}
+ \item Mode $0$ means no excess precision
+ \item Mode $2$ means excess precision (all operations in $80$\,bit floats)
+\end{itemize}
+\begin{itemize}
+ \item Result $0$ without excess precision (expected floating point error)
+ \item \textasciitilde$8e^{-17}$ with excess precision (real value)
+\end{itemize}
+\sourcecode{tests/precision.cpp}
diff --git a/content/tests/whitespace.cpp b/content/tests/whitespace.cpp
new file mode 100644
index 0000000..d4abf47
--- /dev/null
+++ b/content/tests/whitespace.cpp
@@ -0,0 +1 @@
+"\r\r\r\n\t \r\n\r"