From 8d11c6c8213f46f0fa19826917c255edd5d43cb1 Mon Sep 17 00:00:00 2001 From: mzuenni Date: Sun, 28 Jul 2024 22:54:40 +0200 Subject: Test (#4) * update * moved content in subdir * rename file * add test setup * add test setup * add github action * automaticly test all cpp files * timeout after 10s * setulimit and dont zero memory * test build pdf * install latexmk * update * update * ngerman * fonts * removed old code * add first test * added tests * test in sorted order * more tests * simplified test * more tests * fix suffix tree * fixes and improvements * done ust lst directly * fix swap * add links to pdf * fix constants * add primorial * add comment * various improvements * more tests * added missing stuf * more tests * fix tests * more tests * more tests * more tests * fix recursion? * test trie * more tests * only use python temporarily for listings * only use python temporarily for listings * more tests * fix longestCommonSubstring * more tests * more tests * made code more similiar * fix? * more tests * more tests * more tests * add ahoCorasick test + limit 4GB stack size * more tests * fix test * add additional test * more tests * more tests * fix? * better fix * fix virtual tree * more tests * more tests * recursive closest pair * more tests * decrease limit * new tests * more tests * fix name * more tests * add test * new test * more tests * more tests * more tests * more tests * new test and content * new code * new code * larger tests * fix and test * new test * new test * update pdf * remove comments * new test * more tests * more testcases * more tests * increased limit * more tests * more tests * more tests * new tests * more tests * shortened code * new test * add basic tests for bigint * more tests * removed old files * new test * ignore some files * more auto more ccw * fix test * more tests * fix * new tests * more tests * more tests * stronger test * actually verify delaunay... * more tests * fix header * more tests * run tests parallel? * test parralel? * add --missing * separate workflows * test * is the pdf checked? * separate workflows * fix workflow * more workflows --------- Co-authored-by: Yidi --- .github/workflows/list_missing.yml | 9 + .github/workflows/test_all.yml | 14 + .github/workflows/test_datastructures.yml | 22 + .github/workflows/test_geometry.yml | 22 + .github/workflows/test_graph.yml | 22 + .github/workflows/test_math.yml | 22 + .github/workflows/test_other.yml | 22 + .github/workflows/test_pdf.yml | 25 ++ .github/workflows/test_string.yml | 22 + .github/workflows/test_template.yml | 22 + .gitignore | 5 + Makefile | 5 +- content/datastructures/LCT.cpp | 178 ++++++++ content/datastructures/bitset.cpp | 7 + content/datastructures/datastructures.tex | 121 ++++++ content/datastructures/dynamicConvexHull.cpp | 36 ++ content/datastructures/fenwickTree.cpp | 15 + content/datastructures/fenwickTree2.cpp | 21 + content/datastructures/lazyPropagation.cpp | 85 ++++ content/datastructures/lichao.cpp | 46 +++ content/datastructures/monotonicConvexHull.cpp | 27 ++ content/datastructures/pbds.cpp | 18 + content/datastructures/persistent.cpp | 18 + content/datastructures/persistentArray.cpp | 24 ++ content/datastructures/segmentTree.cpp | 42 ++ content/datastructures/sparseTable.cpp | 24 ++ content/datastructures/sparseTableDisjoint.cpp | 27 ++ content/datastructures/stlHashMap.cpp | 17 + content/datastructures/stlPriorityQueue.cpp | 8 + content/datastructures/stlRope.cpp | 8 + content/datastructures/stlTree.cpp | 13 + content/datastructures/treap.cpp | 79 ++++ content/datastructures/treap2.cpp | 79 ++++ content/datastructures/unionFind.cpp | 26 ++ content/datastructures/waveletTree.cpp | 40 ++ content/geometry/antipodalPoints.cpp | 12 + content/geometry/circle.cpp | 33 ++ content/geometry/closestPair.cpp | 27 ++ content/geometry/convexHull.cpp | 18 + content/geometry/delaunay.cpp | 124 ++++++ content/geometry/formulas.cpp | 42 ++ content/geometry/formulas3d.cpp | 53 +++ content/geometry/geometry.tex | 62 +++ content/geometry/hpi.cpp | 68 ++++ content/geometry/lines.cpp | 33 ++ content/geometry/linesAndSegments.cpp | 89 ++++ content/geometry/polygon.cpp | 150 +++++++ content/geometry/segmentIntersection.cpp | 63 +++ content/geometry/sortAround.cpp | 11 + content/geometry/spheres.cpp | 29 ++ content/geometry/triangle.cpp | 43 ++ content/geometry/triangle.tex | 41 ++ content/graph/2sat.cpp | 31 ++ content/graph/LCA_sparse.cpp | 32 ++ content/graph/TSP.cpp | 29 ++ content/graph/articulationPoints.cpp | 43 ++ content/graph/bellmannFord.cpp | 19 + content/graph/bitonicTSP.cpp | 31 ++ content/graph/bitonicTSPsimple.cpp | 27 ++ content/graph/blossom.cpp | 82 ++++ content/graph/bronKerbosch.cpp | 24 ++ content/graph/centroid.cpp | 21 + content/graph/connect.cpp | 31 ++ content/graph/cycleCounting.cpp | 64 +++ content/graph/dfs.tex | 16 + content/graph/dijkstra.cpp | 21 + content/graph/dinicScaling.cpp | 51 +++ content/graph/euler.cpp | 23 ++ content/graph/floydWarshall.cpp | 27 ++ content/graph/graph.tex | 269 +++++++++++++ content/graph/havelHakimi.cpp | 18 + content/graph/hld.cpp | 44 ++ content/graph/hopcroftKarp.cpp | 47 +++ content/graph/kruskal.cpp | 9 + content/graph/matching.cpp | 23 ++ content/graph/maxCarBiMatch.cpp | 25 ++ content/graph/maxWeightBipartiteMatching.cpp | 50 +++ content/graph/minCostMaxFlow.cpp | 66 +++ content/graph/pushRelabel.cpp | 64 +++ content/graph/reroot.cpp | 62 +++ content/graph/scc.cpp | 32 ++ content/graph/stoerWagner.cpp | 53 +++ content/graph/treeIsomorphism.cpp | 15 + content/graph/virtualTree.cpp | 22 + content/latexHeaders/code.sty | 141 +++++++ content/latexHeaders/commands.sty | 56 +++ content/latexHeaders/layout.sty | 82 ++++ content/latexHeaders/math.sty | 98 +++++ content/math/berlekampMassey.cpp | 31 ++ content/math/bigint.cpp | 271 +++++++++++++ content/math/binomial0.cpp | 14 + content/math/binomial1.cpp | 8 + content/math/binomial2.cpp | 32 ++ content/math/binomial3.cpp | 10 + content/math/chineseRemainder.cpp | 14 + content/math/cycleDetection.cpp | 18 + content/math/discreteLogarithm.cpp | 17 + content/math/discreteNthRoot.cpp | 5 + content/math/divisors.cpp | 11 + content/math/extendedEuclid.cpp | 6 + content/math/gauss.cpp | 36 ++ content/math/gcd-lcm.cpp | 2 + content/math/goldenSectionSearch.cpp | 15 + content/math/inversions.cpp | 9 + content/math/inversionsMerge.cpp | 27 ++ content/math/kthperm.cpp | 14 + content/math/legendre.cpp | 4 + content/math/lgsFp.cpp | 26 ++ content/math/linearCongruence.cpp | 5 + content/math/linearRecurence.cpp | 33 ++ content/math/linearSieve.cpp | 50 +++ content/math/longestIncreasingSubsequence.cpp | 17 + content/math/math.tex | 525 ++++++++++++++++++++++++ content/math/matrixPower.cpp | 14 + content/math/millerRabin.cpp | 19 + content/math/modExp.cpp | 6 + content/math/modMulIterativ.cpp | 9 + content/math/modPowIterativ.cpp | 9 + content/math/multInv.cpp | 4 + content/math/permIndex.cpp | 13 + content/math/piLegendre.cpp | 23 ++ content/math/piLehmer.cpp | 52 +++ content/math/polynomial.cpp | 65 +++ content/math/primeSieve.cpp | 16 + content/math/primitiveRoot.cpp | 23 ++ content/math/rho.cpp | 19 + content/math/shortModInv.cpp | 3 + content/math/simpson.cpp | 12 + content/math/sqrtModCipolla.cpp | 14 + content/math/squfof.cpp | 89 ++++ content/math/tables.tex | 18 + content/math/tables/binom.tex | 28 ++ content/math/tables/composite.tex | 27 ++ content/math/tables/nim.tex | 96 +++++ content/math/tables/numbers.tex | 59 +++ content/math/tables/platonic.tex | 39 ++ content/math/tables/probability.tex | 27 ++ content/math/tables/series.tex | 33 ++ content/math/tables/stuff.tex | 32 ++ content/math/tables/twelvefold.tex | 32 ++ content/math/transforms/andTransform.cpp | 8 + content/math/transforms/bitwiseTransforms.cpp | 12 + content/math/transforms/fft.cpp | 23 ++ content/math/transforms/fftMul.cpp | 15 + content/math/transforms/multiplyBitwise.cpp | 8 + content/math/transforms/multiplyFFT.cpp | 12 + content/math/transforms/multiplyNTT.cpp | 8 + content/math/transforms/ntt.cpp | 23 ++ content/math/transforms/orTransform.cpp | 8 + content/math/transforms/seriesOperations.cpp | 56 +++ content/math/transforms/xorTransform.cpp | 10 + content/other/bitOps.cpp | 18 + content/other/compiletime.cpp | 7 + content/other/divideAndConquer.cpp | 27 ++ content/other/fastIO.cpp | 24 ++ content/other/josephus2.cpp | 8 + content/other/josephusK.cpp | 5 + content/other/knuth.cpp | 15 + content/other/other.tex | 312 ++++++++++++++ content/other/pbs.cpp | 19 + content/other/pragmas.cpp | 6 + content/other/sos.cpp | 6 + content/other/split.cpp | 10 + content/other/stress.sh | 7 + content/other/stuff.cpp | 29 ++ content/other/timed.cpp | 3 + content/python/io.py | 3 + content/python/python.tex | 10 + content/python/recursion.py | 2 + content/string/ahoCorasick.cpp | 52 +++ content/string/deBruijn.cpp | 7 + content/string/duval.cpp | 21 + content/string/kmp.cpp | 20 + content/string/longestCommonSubsequence.cpp | 15 + content/string/lyndon.cpp | 11 + content/string/manacher.cpp | 20 + content/string/rollingHash.cpp | 18 + content/string/rollingHashCf.cpp | 17 + content/string/string.tex | 132 ++++++ content/string/suffixArray.cpp | 38 ++ content/string/suffixAutomaton.cpp | 63 +++ content/string/suffixTree.cpp | 72 ++++ content/string/trie.cpp | 35 ++ content/string/z.cpp | 10 + content/tcr.tex | 65 +++ content/template/console.sh | 2 + content/template/template.cpp | 17 + content/template/template.tex | 9 + content/tests/gcc5bug.cpp | 4 + content/tests/precision.cpp | 8 + content/tests/test.tex | 43 ++ content/tests/whitespace.cpp | 1 + datastructures/LCT.cpp | 178 -------- datastructures/RMQ.cpp | 27 -- datastructures/bitset.cpp | 7 - datastructures/datastructures.tex | 136 ------- datastructures/dynamicConvexHull.cpp | 36 -- datastructures/fenwickTree.cpp | 15 - datastructures/fenwickTree2.cpp | 21 - datastructures/firstUnused.cpp | 13 - datastructures/lazyPropagation.cpp | 83 ---- datastructures/lichao.cpp | 46 --- datastructures/monotonicConvexHull.cpp | 27 -- datastructures/pbds.cpp | 18 - datastructures/persistent.cpp | 18 - datastructures/persistentArray.cpp | 24 -- datastructures/segmentTree.cpp | 42 -- datastructures/sparseTable.cpp | 23 -- datastructures/sparseTableDisjoint.cpp | 26 -- datastructures/stlHashMap.cpp | 17 - datastructures/stlPQ.cpp | 15 - datastructures/stlPriorityQueue.cpp | 8 - datastructures/stlRope.cpp | 8 - datastructures/stlTree.cpp | 13 - datastructures/treap.cpp | 79 ---- datastructures/treap2.cpp | 79 ---- datastructures/unionFind.cpp | 26 -- datastructures/unionFind2.cpp | 25 -- datastructures/waveletTree.cpp | 40 -- geometry/antipodalPoints.cpp | 12 - geometry/circle.cpp | 33 -- geometry/closestPair.cpp | 38 -- geometry/convexHull.cpp | 19 - geometry/delaunay.cpp | 124 ------ geometry/formulars.cpp | 42 -- geometry/formulars3d.cpp | 53 --- geometry/geometry.tex | 62 --- geometry/hpi.cpp | 68 ---- geometry/lines.cpp | 33 -- geometry/linesAndSegments.cpp | 89 ---- geometry/polygon.cpp | 150 ------- geometry/segmentIntersection.cpp | 63 --- geometry/sortAround.cpp | 10 - geometry/spheres.cpp | 29 -- geometry/triangle.cpp | 43 -- geometry/triangle.tex | 41 -- graph/2sat.cpp | 31 -- graph/LCA.cpp | 24 -- graph/LCA_sparse.cpp | 32 -- graph/TSP.cpp | 28 -- graph/articulationPoints.cpp | 45 --- graph/bellmannFord.cpp | 17 - graph/bitonicTSP.cpp | 31 -- graph/bitonicTSPsimple.cpp | 28 -- graph/blossom.cpp | 82 ---- graph/bronKerbosch.cpp | 24 -- graph/capacityScaling.cpp | 44 -- graph/centroid.cpp | 21 - graph/connect.cpp | 31 -- graph/cycleCounting.cpp | 64 --- graph/dfs.tex | 16 - graph/dijkstra.cpp | 21 - graph/dinicScaling.cpp | 51 --- graph/euler.cpp | 23 -- graph/floydWarshall.cpp | 26 -- graph/graph.tex | 276 ------------- graph/havelHakimi.cpp | 18 - graph/hld.cpp | 44 -- graph/hopcroftKarp.cpp | 47 --- graph/kruskal.cpp | 9 - graph/matching.cpp | 23 -- graph/maxCarBiMatch.cpp | 25 -- graph/maxWeightBipartiteMatching.cpp | 50 --- graph/minCostMaxFlow.cpp | 66 --- graph/pushRelabel.cpp | 64 --- graph/reroot.cpp | 62 --- graph/scc.cpp | 32 -- graph/stoerWagner.cpp | 53 --- graph/treeIsomorphism.cpp | 15 - graph/virtualTree.cpp | 22 - latexHeaders/code.sty | 125 ------ latexHeaders/commands.sty | 56 --- latexHeaders/layout.sty | 82 ---- latexHeaders/math.sty | 98 ----- math/berlekampMassey.cpp | 31 -- math/bigint.cpp | 275 ------------- math/binomial0.cpp | 14 - math/binomial1.cpp | 8 - math/binomial2.cpp | 32 -- math/binomial3.cpp | 10 - math/chineseRemainder.cpp | 14 - math/cycleDetection.cpp | 16 - math/discreteLogarithm.cpp | 14 - math/discreteNthRoot.cpp | 5 - math/divisors.cpp | 11 - math/extendedEuclid.cpp | 6 - math/gauss.cpp | 36 -- math/gcd-lcm.cpp | 2 - math/goldenSectionSearch.cpp | 15 - math/inversions.cpp | 9 - math/inversionsMerge.cpp | 27 -- math/kthperm.cpp | 14 - math/legendre.cpp | 4 - math/lgsFp.cpp | 26 -- math/linearCongruence.cpp | 5 - math/linearRecurence.cpp | 33 -- math/linearSieve.cpp | 49 --- math/longestIncreasingSubsequence.cpp | 17 - math/math.tex | 535 ------------------------- math/matrixPower.cpp | 16 - math/millerRabin.cpp | 19 - math/mobius.cpp | 21 - math/modExp.cpp | 6 - math/modMulIterativ.cpp | 9 - math/modPowIterativ.cpp | 9 - math/modSqrt.cpp | 23 -- math/multInv.cpp | 4 - math/permIndex.cpp | 13 - math/phi.cpp | 21 - math/piLegendre.cpp | 23 -- math/piLehmer.cpp | 52 --- math/polynomial.cpp | 65 --- math/primeSieve.cpp | 16 - math/primitiveRoot.cpp | 23 -- math/rho.cpp | 19 - math/shortModInv.cpp | 3 - math/simpson.cpp | 12 - math/sqrtModCipolla.cpp | 13 - math/squfof.cpp | 89 ---- math/tables.tex | 18 - math/tables/binom.tex | 28 -- math/tables/composite.tex | 27 -- math/tables/nim.tex | 96 ----- math/tables/numbers.tex | 59 --- math/tables/platonic.tex | 39 -- math/tables/probability.tex | 27 -- math/tables/series.tex | 33 -- math/tables/stuff.tex | 32 -- math/tables/twelvefold.tex | 32 -- math/transforms/andTransform.cpp | 8 - math/transforms/bitwiseTransforms.cpp | 12 - math/transforms/fft.cpp | 23 -- math/transforms/fftMul.cpp | 14 - math/transforms/multiplyBitwise.cpp | 8 - math/transforms/multiplyFFT.cpp | 12 - math/transforms/multiplyNTT.cpp | 8 - math/transforms/ntt.cpp | 23 -- math/transforms/orTransform.cpp | 8 - math/transforms/seriesOperations.cpp | 56 --- math/transforms/xorTransform.cpp | 10 - other/bitOps.cpp | 18 - other/compiletime.cpp | 7 - other/divideAndConquer.cpp | 27 -- other/fastIO.cpp | 24 -- other/josephus2.cpp | 8 - other/josephusK.cpp | 5 - other/knuth.cpp | 15 - other/other.tex | 312 -------------- other/pbs.cpp | 19 - other/pragmas.cpp | 6 - other/sos.cpp | 6 - other/split.cpp | 10 - other/stress.sh | 7 - other/stuff.cpp | 29 -- other/timed.cpp | 3 - python/io.py | 3 - python/python.tex | 10 - python/recursion.py | 2 - string/ahoCorasick.cpp | 52 --- string/deBruijn.cpp | 7 - string/duval.cpp | 21 - string/kmp.cpp | 20 - string/longestCommonSubsequence.cpp | 15 - string/lyndon.cpp | 11 - string/manacher.cpp | 20 - string/rollingHash.cpp | 16 - string/rollingHash2.cpp | 18 - string/rollingHashCf.cpp | 17 - string/string.tex | 132 ------ string/suffixArray.cpp | 38 -- string/suffixAutomaton.cpp | 59 --- string/suffixTree.cpp | 72 ---- string/trie.cpp | 33 -- string/z.cpp | 10 - tcr.pdf | Bin 667098 -> 690769 bytes tcr.tex | 65 --- template/console.cpp | 2 - template/template.cpp | 19 - template/template.tex | 9 - test/datastructures/bitset.cpp | 6 + test/datastructures/fenwickTree.cpp | 58 +++ test/datastructures/fenwickTree2.cpp | 60 +++ test/datastructures/lazyPropagation.cpp | 61 +++ test/datastructures/pbds.cpp | 11 + test/datastructures/segmentTree.cpp | 122 ++++++ test/datastructures/sparseTable.cpp | 51 +++ test/datastructures/sparseTableDisjoint.cpp | 48 +++ test/datastructures/stlHashMap.cpp | 4 + test/datastructures/stlTree.cpp | 2 + test/datastructures/unionFind.cpp | 109 +++++ test/datastructures/waveletTree.cpp | 75 ++++ test/geometry.h | 140 +++++++ test/geometry/antipodalPoints.cpp | 70 ++++ test/geometry/circle.cpp | 116 ++++++ test/geometry/closestPair.cpp | 69 ++++ test/geometry/closestPair.double.cpp | 66 +++ test/geometry/convexHull.cpp | 79 ++++ test/geometry/delaunay.cpp | 144 +++++++ test/geometry/formulas.cpp | 127 ++++++ test/geometry/linesAndSegments.cpp | 240 +++++++++++ test/geometry/polygon.cpp | 296 ++++++++++++++ test/geometry/segmentIntersection.cpp | 88 ++++ test/geometry/sortAround.cpp | 83 ++++ test/geometry/triangle.cpp | 146 +++++++ test/graph/2sat.cpp | 133 ++++++ test/graph/LCA_sparse.cpp | 63 +++ test/graph/TSP.cpp | 67 ++++ test/graph/articulationPoints.bcc.cpp | 78 ++++ test/graph/articulationPoints.bridges.cpp | 64 +++ test/graph/articulationPoints.cpp | 85 ++++ test/graph/bellmannFord.cpp | 70 ++++ test/graph/bitonicTSP.cpp | 49 +++ test/graph/bitonicTSPsimple.cpp | 49 +++ test/graph/blossom.cpp | 76 ++++ test/graph/bronKerbosch.cpp | 73 ++++ test/graph/centroid.cpp | 77 ++++ test/graph/cycleCounting.cpp | 79 ++++ test/graph/dijkstra.cpp | 64 +++ test/graph/dinicScaling.cpp | 61 +++ test/graph/euler.cpp | 87 ++++ test/graph/floydWarshall.cpp | 90 +++++ test/graph/havelHakimi.cpp | 65 +++ test/graph/hopcroftKarp.cpp | 74 ++++ test/graph/kruskal.cpp | 91 +++++ test/graph/matching.cpp | 62 +++ test/graph/maxCarBiMatch.cpp | 74 ++++ test/graph/maxWeightBipartiteMatching.cpp | 59 +++ test/graph/minCostMaxFlow.cpp | 68 ++++ test/graph/pushRelabel.cpp | 61 +++ test/graph/scc.cpp | 92 +++++ test/graph/stoerWagner.cpp | 81 ++++ test/graph/treeIsomorphism.cpp | 126 ++++++ test/math/berlekampMassey.cpp | 68 ++++ test/math/bigint.cpp | 122 ++++++ test/math/binomial0.cpp | 31 ++ test/math/binomial1.cpp | 27 ++ test/math/binomial2.cpp | 29 ++ test/math/binomial3.cpp | 31 ++ test/math/chineseRemainder.cpp | 47 +++ test/math/cycleDetection.cpp | 47 +++ test/math/discreteLogarithm.cpp | 64 +++ test/math/discreteNthRoot.cpp | 78 ++++ test/math/divisors.cpp | 65 +++ test/math/extendedEuclid.cpp | 41 ++ test/math/gauss.cpp | 118 ++++++ test/math/gcd-lcm.cpp | 46 +++ test/math/goldenSectionSearch.cpp | 74 ++++ test/math/inversions.cpp | 43 ++ test/math/inversionsMerge.cpp | 46 +++ test/math/kthperm.cpp | 38 ++ test/math/kthperm_permIndex.cpp | 21 + test/math/legendre.cpp | 43 ++ test/math/lgsFp.cpp | 118 ++++++ test/math/linearCongruence.cpp | 53 +++ test/math/linearRecurence.cpp | 54 +++ test/math/linearSieve.cpp | 71 ++++ test/math/longestIncreasingSubsequence.cpp | 76 ++++ test/math/matrixPower.cpp | 116 ++++++ test/math/millerRabin.base32.cpp | 137 +++++++ test/math/millerRabin.cpp | 129 ++++++ test/math/modExp.cpp | 42 ++ test/math/modMulIterativ.cpp | 57 +++ test/math/modPowIterativ.cpp | 42 ++ test/math/multInv.cpp | 40 ++ test/math/permIndex.cpp | 39 ++ test/math/piLegendre.cpp | 40 ++ test/math/piLehmer.cpp | 42 ++ test/math/primeSieve.cpp | 47 +++ test/math/primitiveRoot.cpp | 82 ++++ test/math/rho.cpp | 117 ++++++ test/math/shortModInv.cpp | 39 ++ test/math/simpson.cpp | 63 +++ test/math/sqrtModCipolla.cpp | 48 +++ test/math/transforms/andTransform.cpp | 38 ++ test/math/transforms/bitwiseTransforms.cpp | 38 ++ test/math/transforms/fft.cpp | 51 +++ test/math/transforms/fftMul.cpp | 62 +++ test/math/transforms/multiplyBitwise.cpp | 55 +++ test/math/transforms/multiplyFFT.cpp | 55 +++ test/math/transforms/multiplyNTT.cpp | 56 +++ test/math/transforms/ntt.cpp | 39 ++ test/math/transforms/orTransform.cpp | 38 ++ test/math/transforms/xorTransform.cpp | 38 ++ test/other/compiletime.cpp | 2 + test/other/divideAndConquer.cpp | 103 +++++ test/other/fastIO.cpp | 32 ++ test/other/fastIO.in | 2 + test/other/josephus2.cpp | 42 ++ test/other/josephusK.cpp | 43 ++ test/other/knuth.cpp | 103 +++++ test/other/sos.cpp | 50 +++ test/other/split.cpp | 24 ++ test/string/ahoCorasick.cpp | 76 ++++ test/string/deBruijn.cpp | 43 ++ test/string/duval.cpp | 85 ++++ test/string/kmp.cpp | 85 ++++ test/string/longestCommonSubsequence.cpp | 55 +++ test/string/lyndon.cpp | 61 +++ test/string/manacher.cpp | 49 +++ test/string/rollingHash.cpp | 92 +++++ test/string/rollingHashCf.cpp | 94 +++++ test/string/suffixArray.cpp | 61 +++ test/string/suffixAutomaton.cpp | 62 +++ test/string/suffixTree.cpp | 50 +++ test/string/trie.cpp | 58 +++ test/string/z.cpp | 41 ++ test/template/template.cpp | 1 + test/test.sh | 70 ++++ test/util.h | 411 +++++++++++++++++++ tests/gcc5bug.cpp | 4 - tests/precision.cpp | 8 - tests/test.tex | 43 -- tests/whitespace.cpp | 1 - 513 files changed, 16354 insertions(+), 7208 deletions(-) create mode 100644 .github/workflows/list_missing.yml create mode 100644 .github/workflows/test_all.yml create mode 100644 .github/workflows/test_datastructures.yml create mode 100644 .github/workflows/test_geometry.yml create mode 100644 .github/workflows/test_graph.yml create mode 100644 .github/workflows/test_math.yml create mode 100644 .github/workflows/test_other.yml create mode 100644 .github/workflows/test_pdf.yml create mode 100644 .github/workflows/test_string.yml create mode 100644 .github/workflows/test_template.yml create mode 100644 content/datastructures/LCT.cpp create mode 100644 content/datastructures/bitset.cpp create mode 100644 content/datastructures/datastructures.tex create mode 100644 content/datastructures/dynamicConvexHull.cpp create mode 100644 content/datastructures/fenwickTree.cpp create mode 100644 content/datastructures/fenwickTree2.cpp create mode 100644 content/datastructures/lazyPropagation.cpp create mode 100644 content/datastructures/lichao.cpp create mode 100644 content/datastructures/monotonicConvexHull.cpp create mode 100644 content/datastructures/pbds.cpp create mode 100644 content/datastructures/persistent.cpp create mode 100644 content/datastructures/persistentArray.cpp create mode 100644 content/datastructures/segmentTree.cpp create mode 100644 content/datastructures/sparseTable.cpp create mode 100644 content/datastructures/sparseTableDisjoint.cpp create mode 100644 content/datastructures/stlHashMap.cpp create mode 100644 content/datastructures/stlPriorityQueue.cpp create mode 100644 content/datastructures/stlRope.cpp create mode 100644 content/datastructures/stlTree.cpp create mode 100644 content/datastructures/treap.cpp create mode 100644 content/datastructures/treap2.cpp create mode 100644 content/datastructures/unionFind.cpp create mode 100644 content/datastructures/waveletTree.cpp create mode 100644 content/geometry/antipodalPoints.cpp create mode 100644 content/geometry/circle.cpp create mode 100644 content/geometry/closestPair.cpp create mode 100644 content/geometry/convexHull.cpp create mode 100644 content/geometry/delaunay.cpp create mode 100644 content/geometry/formulas.cpp create mode 100644 content/geometry/formulas3d.cpp create mode 100644 content/geometry/geometry.tex create mode 100644 content/geometry/hpi.cpp create mode 100644 content/geometry/lines.cpp create mode 100644 content/geometry/linesAndSegments.cpp create mode 100644 content/geometry/polygon.cpp create mode 100644 content/geometry/segmentIntersection.cpp create mode 100644 content/geometry/sortAround.cpp create mode 100644 content/geometry/spheres.cpp create mode 100644 content/geometry/triangle.cpp create mode 100644 content/geometry/triangle.tex create mode 100644 content/graph/2sat.cpp create mode 100644 content/graph/LCA_sparse.cpp create mode 100644 content/graph/TSP.cpp create mode 100644 content/graph/articulationPoints.cpp create mode 100644 content/graph/bellmannFord.cpp create mode 100644 content/graph/bitonicTSP.cpp create mode 100644 content/graph/bitonicTSPsimple.cpp create mode 100644 content/graph/blossom.cpp create mode 100644 content/graph/bronKerbosch.cpp create mode 100644 content/graph/centroid.cpp create mode 100644 content/graph/connect.cpp create mode 100644 content/graph/cycleCounting.cpp create mode 100644 content/graph/dfs.tex create mode 100644 content/graph/dijkstra.cpp create mode 100644 content/graph/dinicScaling.cpp create mode 100644 content/graph/euler.cpp create mode 100644 content/graph/floydWarshall.cpp create mode 100644 content/graph/graph.tex create mode 100644 content/graph/havelHakimi.cpp create mode 100644 content/graph/hld.cpp create mode 100644 content/graph/hopcroftKarp.cpp create mode 100644 content/graph/kruskal.cpp create mode 100644 content/graph/matching.cpp create mode 100644 content/graph/maxCarBiMatch.cpp create mode 100644 content/graph/maxWeightBipartiteMatching.cpp create mode 100644 content/graph/minCostMaxFlow.cpp create mode 100644 content/graph/pushRelabel.cpp create mode 100644 content/graph/reroot.cpp create mode 100644 content/graph/scc.cpp create mode 100644 content/graph/stoerWagner.cpp create mode 100644 content/graph/treeIsomorphism.cpp create mode 100644 content/graph/virtualTree.cpp create mode 100644 content/latexHeaders/code.sty create mode 100644 content/latexHeaders/commands.sty create mode 100644 content/latexHeaders/layout.sty create mode 100644 content/latexHeaders/math.sty create mode 100644 content/math/berlekampMassey.cpp create mode 100644 content/math/bigint.cpp create mode 100644 content/math/binomial0.cpp create mode 100644 content/math/binomial1.cpp create mode 100644 content/math/binomial2.cpp create mode 100644 content/math/binomial3.cpp create mode 100644 content/math/chineseRemainder.cpp create mode 100644 content/math/cycleDetection.cpp create mode 100644 content/math/discreteLogarithm.cpp create mode 100644 content/math/discreteNthRoot.cpp create mode 100644 content/math/divisors.cpp create mode 100644 content/math/extendedEuclid.cpp create mode 100644 content/math/gauss.cpp create mode 100644 content/math/gcd-lcm.cpp create mode 100644 content/math/goldenSectionSearch.cpp create mode 100644 content/math/inversions.cpp create mode 100644 content/math/inversionsMerge.cpp create mode 100644 content/math/kthperm.cpp create mode 100644 content/math/legendre.cpp create mode 100644 content/math/lgsFp.cpp create mode 100644 content/math/linearCongruence.cpp create mode 100644 content/math/linearRecurence.cpp create mode 100644 content/math/linearSieve.cpp create mode 100644 content/math/longestIncreasingSubsequence.cpp create mode 100644 content/math/math.tex create mode 100644 content/math/matrixPower.cpp create mode 100644 content/math/millerRabin.cpp create mode 100644 content/math/modExp.cpp create mode 100644 content/math/modMulIterativ.cpp create mode 100644 content/math/modPowIterativ.cpp create mode 100644 content/math/multInv.cpp create mode 100644 content/math/permIndex.cpp create mode 100644 content/math/piLegendre.cpp create mode 100644 content/math/piLehmer.cpp create mode 100644 content/math/polynomial.cpp create mode 100644 content/math/primeSieve.cpp create mode 100644 content/math/primitiveRoot.cpp create mode 100644 content/math/rho.cpp create mode 100644 content/math/shortModInv.cpp create mode 100644 content/math/simpson.cpp create mode 100644 content/math/sqrtModCipolla.cpp create mode 100644 content/math/squfof.cpp create mode 100644 content/math/tables.tex create mode 100644 content/math/tables/binom.tex create mode 100644 content/math/tables/composite.tex create mode 100644 content/math/tables/nim.tex create mode 100644 content/math/tables/numbers.tex create mode 100644 content/math/tables/platonic.tex create mode 100644 content/math/tables/probability.tex create mode 100644 content/math/tables/series.tex create mode 100644 content/math/tables/stuff.tex create mode 100644 content/math/tables/twelvefold.tex create mode 100644 content/math/transforms/andTransform.cpp create mode 100644 content/math/transforms/bitwiseTransforms.cpp create mode 100644 content/math/transforms/fft.cpp create mode 100644 content/math/transforms/fftMul.cpp create mode 100644 content/math/transforms/multiplyBitwise.cpp create mode 100644 content/math/transforms/multiplyFFT.cpp create mode 100644 content/math/transforms/multiplyNTT.cpp create mode 100644 content/math/transforms/ntt.cpp create mode 100644 content/math/transforms/orTransform.cpp create mode 100644 content/math/transforms/seriesOperations.cpp create mode 100644 content/math/transforms/xorTransform.cpp create mode 100644 content/other/bitOps.cpp create mode 100644 content/other/compiletime.cpp create mode 100644 content/other/divideAndConquer.cpp create mode 100644 content/other/fastIO.cpp create mode 100644 content/other/josephus2.cpp create mode 100644 content/other/josephusK.cpp create mode 100644 content/other/knuth.cpp create mode 100644 content/other/other.tex create mode 100644 content/other/pbs.cpp create mode 100644 content/other/pragmas.cpp create mode 100644 content/other/sos.cpp create mode 100644 content/other/split.cpp create mode 100644 content/other/stress.sh create mode 100644 content/other/stuff.cpp create mode 100644 content/other/timed.cpp create mode 100644 content/python/io.py create mode 100644 content/python/python.tex create mode 100644 content/python/recursion.py create mode 100644 content/string/ahoCorasick.cpp create mode 100644 content/string/deBruijn.cpp create mode 100644 content/string/duval.cpp create mode 100644 content/string/kmp.cpp create mode 100644 content/string/longestCommonSubsequence.cpp create mode 100644 content/string/lyndon.cpp create mode 100644 content/string/manacher.cpp create mode 100644 content/string/rollingHash.cpp create mode 100644 content/string/rollingHashCf.cpp create mode 100644 content/string/string.tex create mode 100644 content/string/suffixArray.cpp create mode 100644 content/string/suffixAutomaton.cpp create mode 100644 content/string/suffixTree.cpp create mode 100644 content/string/trie.cpp create mode 100644 content/string/z.cpp create mode 100644 content/tcr.tex create mode 100644 content/template/console.sh create mode 100644 content/template/template.cpp create mode 100644 content/template/template.tex create mode 100644 content/tests/gcc5bug.cpp create mode 100644 content/tests/precision.cpp create mode 100644 content/tests/test.tex create mode 100644 content/tests/whitespace.cpp delete mode 100644 datastructures/LCT.cpp delete mode 100644 datastructures/RMQ.cpp delete mode 100644 datastructures/bitset.cpp delete mode 100644 datastructures/datastructures.tex delete mode 100644 datastructures/dynamicConvexHull.cpp delete mode 100644 datastructures/fenwickTree.cpp delete mode 100644 datastructures/fenwickTree2.cpp delete mode 100644 datastructures/firstUnused.cpp delete mode 100644 datastructures/lazyPropagation.cpp delete mode 100644 datastructures/lichao.cpp delete mode 100644 datastructures/monotonicConvexHull.cpp delete mode 100644 datastructures/pbds.cpp delete mode 100644 datastructures/persistent.cpp delete mode 100644 datastructures/persistentArray.cpp delete mode 100644 datastructures/segmentTree.cpp delete mode 100644 datastructures/sparseTable.cpp delete mode 100644 datastructures/sparseTableDisjoint.cpp delete mode 100644 datastructures/stlHashMap.cpp delete mode 100644 datastructures/stlPQ.cpp delete mode 100644 datastructures/stlPriorityQueue.cpp delete mode 100644 datastructures/stlRope.cpp delete mode 100644 datastructures/stlTree.cpp delete mode 100644 datastructures/treap.cpp delete mode 100644 datastructures/treap2.cpp delete mode 100644 datastructures/unionFind.cpp delete mode 100644 datastructures/unionFind2.cpp delete mode 100644 datastructures/waveletTree.cpp delete mode 100644 geometry/antipodalPoints.cpp delete mode 100644 geometry/circle.cpp delete mode 100644 geometry/closestPair.cpp delete mode 100644 geometry/convexHull.cpp delete mode 100644 geometry/delaunay.cpp delete mode 100644 geometry/formulars.cpp delete mode 100644 geometry/formulars3d.cpp delete mode 100644 geometry/geometry.tex delete mode 100644 geometry/hpi.cpp delete mode 100644 geometry/lines.cpp delete mode 100644 geometry/linesAndSegments.cpp delete mode 100644 geometry/polygon.cpp delete mode 100644 geometry/segmentIntersection.cpp delete mode 100644 geometry/sortAround.cpp delete mode 100644 geometry/spheres.cpp delete mode 100644 geometry/triangle.cpp delete mode 100644 geometry/triangle.tex delete mode 100644 graph/2sat.cpp delete mode 100644 graph/LCA.cpp delete mode 100644 graph/LCA_sparse.cpp delete mode 100644 graph/TSP.cpp delete mode 100644 graph/articulationPoints.cpp delete mode 100644 graph/bellmannFord.cpp delete mode 100644 graph/bitonicTSP.cpp delete mode 100644 graph/bitonicTSPsimple.cpp delete mode 100644 graph/blossom.cpp delete mode 100644 graph/bronKerbosch.cpp delete mode 100644 graph/capacityScaling.cpp delete mode 100644 graph/centroid.cpp delete mode 100644 graph/connect.cpp delete mode 100644 graph/cycleCounting.cpp delete mode 100644 graph/dfs.tex delete mode 100644 graph/dijkstra.cpp delete mode 100644 graph/dinicScaling.cpp delete mode 100644 graph/euler.cpp delete mode 100644 graph/floydWarshall.cpp delete mode 100644 graph/graph.tex delete mode 100644 graph/havelHakimi.cpp delete mode 100644 graph/hld.cpp delete mode 100644 graph/hopcroftKarp.cpp delete mode 100644 graph/kruskal.cpp delete mode 100644 graph/matching.cpp delete mode 100644 graph/maxCarBiMatch.cpp delete mode 100644 graph/maxWeightBipartiteMatching.cpp delete mode 100644 graph/minCostMaxFlow.cpp delete mode 100644 graph/pushRelabel.cpp delete mode 100644 graph/reroot.cpp delete mode 100644 graph/scc.cpp delete mode 100644 graph/stoerWagner.cpp delete mode 100644 graph/treeIsomorphism.cpp delete mode 100644 graph/virtualTree.cpp delete mode 100644 latexHeaders/code.sty delete mode 100644 latexHeaders/commands.sty delete mode 100644 latexHeaders/layout.sty delete mode 100644 latexHeaders/math.sty delete mode 100644 math/berlekampMassey.cpp delete mode 100644 math/bigint.cpp delete mode 100644 math/binomial0.cpp delete mode 100644 math/binomial1.cpp delete mode 100644 math/binomial2.cpp delete mode 100644 math/binomial3.cpp delete mode 100644 math/chineseRemainder.cpp delete mode 100644 math/cycleDetection.cpp delete mode 100644 math/discreteLogarithm.cpp delete mode 100644 math/discreteNthRoot.cpp delete mode 100644 math/divisors.cpp delete mode 100644 math/extendedEuclid.cpp delete mode 100644 math/gauss.cpp delete mode 100644 math/gcd-lcm.cpp delete mode 100644 math/goldenSectionSearch.cpp delete mode 100644 math/inversions.cpp delete mode 100644 math/inversionsMerge.cpp delete mode 100644 math/kthperm.cpp delete mode 100644 math/legendre.cpp delete mode 100644 math/lgsFp.cpp delete mode 100644 math/linearCongruence.cpp delete mode 100644 math/linearRecurence.cpp delete mode 100644 math/linearSieve.cpp delete mode 100644 math/longestIncreasingSubsequence.cpp delete mode 100644 math/math.tex delete mode 100644 math/matrixPower.cpp delete mode 100644 math/millerRabin.cpp delete mode 100644 math/mobius.cpp delete mode 100644 math/modExp.cpp delete mode 100644 math/modMulIterativ.cpp delete mode 100644 math/modPowIterativ.cpp delete mode 100644 math/modSqrt.cpp delete mode 100644 math/multInv.cpp delete mode 100644 math/permIndex.cpp delete mode 100644 math/phi.cpp delete mode 100644 math/piLegendre.cpp delete mode 100644 math/piLehmer.cpp delete mode 100644 math/polynomial.cpp delete mode 100644 math/primeSieve.cpp delete mode 100644 math/primitiveRoot.cpp delete mode 100644 math/rho.cpp delete mode 100644 math/shortModInv.cpp delete mode 100644 math/simpson.cpp delete mode 100644 math/sqrtModCipolla.cpp delete mode 100644 math/squfof.cpp delete mode 100644 math/tables.tex delete mode 100644 math/tables/binom.tex delete mode 100644 math/tables/composite.tex delete mode 100644 math/tables/nim.tex delete mode 100644 math/tables/numbers.tex delete mode 100644 math/tables/platonic.tex delete mode 100644 math/tables/probability.tex delete mode 100644 math/tables/series.tex delete mode 100644 math/tables/stuff.tex delete mode 100644 math/tables/twelvefold.tex delete mode 100644 math/transforms/andTransform.cpp delete mode 100644 math/transforms/bitwiseTransforms.cpp delete mode 100644 math/transforms/fft.cpp delete mode 100644 math/transforms/fftMul.cpp delete mode 100644 math/transforms/multiplyBitwise.cpp delete mode 100644 math/transforms/multiplyFFT.cpp delete mode 100644 math/transforms/multiplyNTT.cpp delete mode 100644 math/transforms/ntt.cpp delete mode 100644 math/transforms/orTransform.cpp delete mode 100644 math/transforms/seriesOperations.cpp delete mode 100644 math/transforms/xorTransform.cpp delete mode 100644 other/bitOps.cpp delete mode 100644 other/compiletime.cpp delete mode 100644 other/divideAndConquer.cpp delete mode 100644 other/fastIO.cpp delete mode 100644 other/josephus2.cpp delete mode 100644 other/josephusK.cpp delete mode 100644 other/knuth.cpp delete mode 100644 other/other.tex delete mode 100644 other/pbs.cpp delete mode 100644 other/pragmas.cpp delete mode 100644 other/sos.cpp delete mode 100644 other/split.cpp delete mode 100644 other/stress.sh delete mode 100644 other/stuff.cpp delete mode 100644 other/timed.cpp delete mode 100644 python/io.py delete mode 100644 python/python.tex delete mode 100644 python/recursion.py delete mode 100644 string/ahoCorasick.cpp delete mode 100644 string/deBruijn.cpp delete mode 100644 string/duval.cpp delete mode 100644 string/kmp.cpp delete mode 100644 string/longestCommonSubsequence.cpp delete mode 100644 string/lyndon.cpp delete mode 100644 string/manacher.cpp delete mode 100644 string/rollingHash.cpp delete mode 100644 string/rollingHash2.cpp delete mode 100644 string/rollingHashCf.cpp delete mode 100644 string/string.tex delete mode 100644 string/suffixArray.cpp delete mode 100644 string/suffixAutomaton.cpp delete mode 100644 string/suffixTree.cpp delete mode 100644 string/trie.cpp delete mode 100644 string/z.cpp delete mode 100644 tcr.tex delete mode 100644 template/console.cpp delete mode 100644 template/template.cpp delete mode 100644 template/template.tex create mode 100644 test/datastructures/bitset.cpp create mode 100644 test/datastructures/fenwickTree.cpp create mode 100644 test/datastructures/fenwickTree2.cpp create mode 100644 test/datastructures/lazyPropagation.cpp create mode 100644 test/datastructures/pbds.cpp create mode 100644 test/datastructures/segmentTree.cpp create mode 100644 test/datastructures/sparseTable.cpp create mode 100644 test/datastructures/sparseTableDisjoint.cpp create mode 100644 test/datastructures/stlHashMap.cpp create mode 100644 test/datastructures/stlTree.cpp create mode 100644 test/datastructures/unionFind.cpp create mode 100644 test/datastructures/waveletTree.cpp create mode 100644 test/geometry.h create mode 100644 test/geometry/antipodalPoints.cpp create mode 100644 test/geometry/circle.cpp create mode 100644 test/geometry/closestPair.cpp create mode 100644 test/geometry/closestPair.double.cpp create mode 100644 test/geometry/convexHull.cpp create mode 100644 test/geometry/delaunay.cpp create mode 100644 test/geometry/formulas.cpp create mode 100644 test/geometry/linesAndSegments.cpp create mode 100644 test/geometry/polygon.cpp create mode 100644 test/geometry/segmentIntersection.cpp create mode 100644 test/geometry/sortAround.cpp create mode 100644 test/geometry/triangle.cpp create mode 100644 test/graph/2sat.cpp create mode 100644 test/graph/LCA_sparse.cpp create mode 100644 test/graph/TSP.cpp create mode 100644 test/graph/articulationPoints.bcc.cpp create mode 100644 test/graph/articulationPoints.bridges.cpp create mode 100644 test/graph/articulationPoints.cpp create mode 100644 test/graph/bellmannFord.cpp create mode 100644 test/graph/bitonicTSP.cpp create mode 100644 test/graph/bitonicTSPsimple.cpp create mode 100644 test/graph/blossom.cpp create mode 100644 test/graph/bronKerbosch.cpp create mode 100644 test/graph/centroid.cpp create mode 100644 test/graph/cycleCounting.cpp create mode 100644 test/graph/dijkstra.cpp create mode 100644 test/graph/dinicScaling.cpp create mode 100644 test/graph/euler.cpp create mode 100644 test/graph/floydWarshall.cpp create mode 100644 test/graph/havelHakimi.cpp create mode 100644 test/graph/hopcroftKarp.cpp create mode 100644 test/graph/kruskal.cpp create mode 100644 test/graph/matching.cpp create mode 100644 test/graph/maxCarBiMatch.cpp create mode 100644 test/graph/maxWeightBipartiteMatching.cpp create mode 100644 test/graph/minCostMaxFlow.cpp create mode 100644 test/graph/pushRelabel.cpp create mode 100644 test/graph/scc.cpp create mode 100644 test/graph/stoerWagner.cpp create mode 100644 test/graph/treeIsomorphism.cpp create mode 100644 test/math/berlekampMassey.cpp create mode 100644 test/math/bigint.cpp create mode 100644 test/math/binomial0.cpp create mode 100644 test/math/binomial1.cpp create mode 100644 test/math/binomial2.cpp create mode 100644 test/math/binomial3.cpp create mode 100644 test/math/chineseRemainder.cpp create mode 100644 test/math/cycleDetection.cpp create mode 100644 test/math/discreteLogarithm.cpp create mode 100644 test/math/discreteNthRoot.cpp create mode 100644 test/math/divisors.cpp create mode 100644 test/math/extendedEuclid.cpp create mode 100644 test/math/gauss.cpp create mode 100644 test/math/gcd-lcm.cpp create mode 100644 test/math/goldenSectionSearch.cpp create mode 100644 test/math/inversions.cpp create mode 100644 test/math/inversionsMerge.cpp create mode 100644 test/math/kthperm.cpp create mode 100644 test/math/kthperm_permIndex.cpp create mode 100644 test/math/legendre.cpp create mode 100644 test/math/lgsFp.cpp create mode 100644 test/math/linearCongruence.cpp create mode 100644 test/math/linearRecurence.cpp create mode 100644 test/math/linearSieve.cpp create mode 100644 test/math/longestIncreasingSubsequence.cpp create mode 100644 test/math/matrixPower.cpp create mode 100644 test/math/millerRabin.base32.cpp create mode 100644 test/math/millerRabin.cpp create mode 100644 test/math/modExp.cpp create mode 100644 test/math/modMulIterativ.cpp create mode 100644 test/math/modPowIterativ.cpp create mode 100644 test/math/multInv.cpp create mode 100644 test/math/permIndex.cpp create mode 100644 test/math/piLegendre.cpp create mode 100644 test/math/piLehmer.cpp create mode 100644 test/math/primeSieve.cpp create mode 100644 test/math/primitiveRoot.cpp create mode 100644 test/math/rho.cpp create mode 100644 test/math/shortModInv.cpp create mode 100644 test/math/simpson.cpp create mode 100644 test/math/sqrtModCipolla.cpp create mode 100644 test/math/transforms/andTransform.cpp create mode 100644 test/math/transforms/bitwiseTransforms.cpp create mode 100644 test/math/transforms/fft.cpp create mode 100644 test/math/transforms/fftMul.cpp create mode 100644 test/math/transforms/multiplyBitwise.cpp create mode 100644 test/math/transforms/multiplyFFT.cpp create mode 100644 test/math/transforms/multiplyNTT.cpp create mode 100644 test/math/transforms/ntt.cpp create mode 100644 test/math/transforms/orTransform.cpp create mode 100644 test/math/transforms/xorTransform.cpp create mode 100644 test/other/compiletime.cpp create mode 100644 test/other/divideAndConquer.cpp create mode 100644 test/other/fastIO.cpp create mode 100644 test/other/fastIO.in create mode 100644 test/other/josephus2.cpp create mode 100644 test/other/josephusK.cpp create mode 100644 test/other/knuth.cpp create mode 100644 test/other/sos.cpp create mode 100644 test/other/split.cpp create mode 100644 test/string/ahoCorasick.cpp create mode 100644 test/string/deBruijn.cpp create mode 100644 test/string/duval.cpp create mode 100644 test/string/kmp.cpp create mode 100644 test/string/longestCommonSubsequence.cpp create mode 100644 test/string/lyndon.cpp create mode 100644 test/string/manacher.cpp create mode 100644 test/string/rollingHash.cpp create mode 100644 test/string/rollingHashCf.cpp create mode 100644 test/string/suffixArray.cpp create mode 100644 test/string/suffixAutomaton.cpp create mode 100644 test/string/suffixTree.cpp create mode 100644 test/string/trie.cpp create mode 100644 test/string/z.cpp create mode 100644 test/template/template.cpp create mode 100755 test/test.sh create mode 100644 test/util.h delete mode 100644 tests/gcc5bug.cpp delete mode 100644 tests/precision.cpp delete mode 100644 tests/test.tex delete mode 100644 tests/whitespace.cpp diff --git a/.github/workflows/list_missing.yml b/.github/workflows/list_missing.yml new file mode 100644 index 0000000..48cbe03 --- /dev/null +++ b/.github/workflows/list_missing.yml @@ -0,0 +1,9 @@ +on: [push, pull_request] + +jobs: + pdf: + name: List missing + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: ./test/test.sh --missing diff --git a/.github/workflows/test_all.yml b/.github/workflows/test_all.yml new file mode 100644 index 0000000..eddc002 --- /dev/null +++ b/.github/workflows/test_all.yml @@ -0,0 +1,14 @@ +on: + workflow_dispatch: + +jobs: + pdf: + strategy: + matrix: + os: [ubuntu-latest, ubuntu-22.04] + name: Test all (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - run: ./test/test.sh diff --git a/.github/workflows/test_datastructures.yml b/.github/workflows/test_datastructures.yml new file mode 100644 index 0000000..9e58389 --- /dev/null +++ b/.github/workflows/test_datastructures.yml @@ -0,0 +1,22 @@ +on: + push: + paths: + - 'content/datastructures/**' + - 'test/datastructures/**' + pull_request: + paths: + - 'content/datastructures/**' + - 'test/datastructures/**' + workflow_dispatch: + +jobs: + pdf: + strategy: + matrix: + os: [ubuntu-latest, ubuntu-22.04] + name: Test datastructures (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - run: ./test/test.sh datastructures diff --git a/.github/workflows/test_geometry.yml b/.github/workflows/test_geometry.yml new file mode 100644 index 0000000..c1cc95d --- /dev/null +++ b/.github/workflows/test_geometry.yml @@ -0,0 +1,22 @@ +on: + push: + paths: + - 'content/geometry/**' + - 'test/geometry/**' + pull_request: + paths: + - 'content/geometry/**' + - 'test/geometry/**' + workflow_dispatch: + +jobs: + pdf: + strategy: + matrix: + os: [ubuntu-latest, ubuntu-22.04] + name: Test geometry (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - run: ./test/test.sh geometry diff --git a/.github/workflows/test_graph.yml b/.github/workflows/test_graph.yml new file mode 100644 index 0000000..b402d21 --- /dev/null +++ b/.github/workflows/test_graph.yml @@ -0,0 +1,22 @@ +on: + push: + paths: + - 'content/graph/**' + - 'test/graph/**' + pull_request: + paths: + - 'content/graph/**' + - 'test/graph/**' + workflow_dispatch: + +jobs: + pdf: + strategy: + matrix: + os: [ubuntu-latest, ubuntu-22.04] + name: Test graph (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - run: ./test/test.sh graph diff --git a/.github/workflows/test_math.yml b/.github/workflows/test_math.yml new file mode 100644 index 0000000..6df75db --- /dev/null +++ b/.github/workflows/test_math.yml @@ -0,0 +1,22 @@ +on: + push: + paths: + - 'content/math/**' + - 'test/math/**' + pull_request: + paths: + - 'content/math/**' + - 'test/math/**' + workflow_dispatch: + +jobs: + pdf: + strategy: + matrix: + os: [ubuntu-latest, ubuntu-22.04] + name: Test math (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - run: ./test/test.sh math diff --git a/.github/workflows/test_other.yml b/.github/workflows/test_other.yml new file mode 100644 index 0000000..07592d5 --- /dev/null +++ b/.github/workflows/test_other.yml @@ -0,0 +1,22 @@ +on: + push: + paths: + - 'content/other/**' + - 'test/other/**' + pull_request: + paths: + - 'content/other/**' + - 'test/other/**' + workflow_dispatch: + +jobs: + pdf: + strategy: + matrix: + os: [ubuntu-latest, ubuntu-22.04] + name: Test other (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - run: ./test/test.sh other diff --git a/.github/workflows/test_pdf.yml b/.github/workflows/test_pdf.yml new file mode 100644 index 0000000..e1d660b --- /dev/null +++ b/.github/workflows/test_pdf.yml @@ -0,0 +1,25 @@ +on: + push: + paths: + - 'content/**' + - 'Makefile' + pull_request: + paths: + - 'content/**' + - 'Makefile' + workflow_dispatch: + +jobs: + pdf: + strategy: + matrix: + os: [ubuntu-latest, ubuntu-22.04] + name: Test pdf (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + timeout-minutes: 5 + steps: + - uses: actions/checkout@v4 + - run: | + sudo apt-get update + sudo apt-get install latexmk texlive-latex-base texlive-latex-recommended texlive-latex-extra texlive-lang-german texlive-fonts-extra + - run: make diff --git a/.github/workflows/test_string.yml b/.github/workflows/test_string.yml new file mode 100644 index 0000000..8ca5e1b --- /dev/null +++ b/.github/workflows/test_string.yml @@ -0,0 +1,22 @@ +on: + push: + paths: + - 'content/string/**' + - 'test/string/**' + pull_request: + paths: + - 'content/string/**' + - 'test/string/**' + workflow_dispatch: + +jobs: + pdf: + strategy: + matrix: + os: [ubuntu-latest, ubuntu-22.04] + name: Test string (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - run: ./test/test.sh string diff --git a/.github/workflows/test_template.yml b/.github/workflows/test_template.yml new file mode 100644 index 0000000..827b9ac --- /dev/null +++ b/.github/workflows/test_template.yml @@ -0,0 +1,22 @@ +on: + push: + paths: + - 'content/template/**' + - 'test/template/**' + pull_request: + paths: + - 'content/template/**' + - 'test/template/**' + workflow_dispatch: + +jobs: + pdf: + strategy: + matrix: + os: [ubuntu-latest, ubuntu-22.04] + name: Test template (${{ matrix.os }}) + runs-on: ${{ matrix.os }} + timeout-minutes: 10 + steps: + - uses: actions/checkout@v4 + - run: ./test/test.sh template diff --git a/.gitignore b/.gitignore index 21eab22..632f1b8 100644 --- a/.gitignore +++ b/.gitignore @@ -220,3 +220,8 @@ TSWLatexianTemp* *-tags.tex *~ + +# ignore build dir +build/* +# dont ignore build tcr +!tcr.pdf diff --git a/Makefile b/Makefile index 0338a34..b3538cf 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,4 @@ all: - latexmk -pdf tcr + cd content; latexmk -pdf tcr -output-directory=.. -aux-directory=../build/ -usepretex="\newcommand{\gitorigin}{https://github.com/mzuenni/ContestReference/tree/$(shell git branch --show-current)/content/}" clean: - latexmk -c tcr - rm -f *.thm + rm -r build/* diff --git a/content/datastructures/LCT.cpp b/content/datastructures/LCT.cpp new file mode 100644 index 0000000..c1dd278 --- /dev/null +++ b/content/datastructures/LCT.cpp @@ -0,0 +1,178 @@ +constexpr ll queryDefault = 0; +constexpr ll updateDefault = 0; + +ll _modify(ll x, ll y) { + return x + y; +} + +ll _query(ll x, ll y) { + return x + y; +} + +ll _update(ll delta, int length) { + if (delta == updateDefault) return updateDefault; + //ll result = delta + //for (int i=1; ileft != this && + parent->right != this); + } + + void push() { + if (revert) { + revert = false; + swap(left, right); + if (left) left->revert ^= 1; + if (right) right->revert ^= 1; + } + nodeValue = joinValueDelta(nodeValue, delta); + subTreeValue = joinValueDelta(subTreeValue, + _update(delta, size)); + if (left) left->delta = joinDeltas(left->delta, delta); + if (right) right->delta = joinDeltas(right->delta, delta); + delta = updateDefault; + } + + ll getSubtreeValue() { + return joinValueDelta(subTreeValue, _update(delta, size)); + } + + void update() { + subTreeValue = joinValueDelta(nodeValue, delta); + size = 1; + if (left) { + subTreeValue = _query(subTreeValue, + left->getSubtreeValue()); + size += left->size; + } + if (right) { + subTreeValue = _query(subTreeValue, + right->getSubtreeValue()); + size += right->size; + }} + }; + + vector nodes; + + LCT(int n) : nodes(n) { + for (int i = 0; i < n; i++) nodes[i].id = i; + } + + void connect(Node* ch, Node* p, int isLeftChild) { + if (ch) ch->parent = p; + if (isLeftChild >= 0) { + if (isLeftChild) p->left = ch; + else p->right = ch; + }} + + void rotate(Node* x) { + Node* p = x->parent; + Node* g = p->parent; + bool isRootP = p->isRoot(); + bool leftChildX = (x == p->left); + + connect(leftChildX ? x->right : x->left, p, leftChildX); + connect(p, x, !leftChildX); + connect(x, g, isRootP ? -1 : p == g->left); + p->update(); + } + + void splay(Node* x) { + while (!x->isRoot()) { + Node* p = x->parent; + Node* g = p->parent; + if (!p->isRoot()) g->push(); + p->push(); + x->push(); + if (!p->isRoot()) rotate((x == p->left) == + (p == g->left) ? p : x); + rotate(x); + } + x->push(); + x->update(); + } + + Node* expose(Node* x) { + Node* last = nullptr; + for (Node* y = x; y; y = y->parent) { + splay(y); + y->left = last; + last = y; + } + splay(x); + return last; + } + + void makeRoot(Node* x) { + expose(x); + x->revert ^= 1; + } + + bool connected(Node* x, Node* y) { + if (x == y) return true; + expose(x); + expose(y); + return x->parent; + } + + void link(Node* x, Node* y) { + assert(!connected(x, y)); // not yet connected! + makeRoot(x); + x->parent = y; + } + + void cut(Node* x, Node* y) { + makeRoot(x); + expose(y); + //must be a tree edge! + assert(!(y->right != x || x->left != nullptr)); + y->right->parent = nullptr; + y->right = nullptr; + } + + Node* lca(Node* x, Node* y) { + assert(connected(x, y)); + expose(x); + return expose(y); + } + + ll query(Node* from, Node* to) { + makeRoot(from); + expose(to); + if (to) return to->getSubtreeValue(); + return queryDefault; + } + + void modify(Node* from, Node* to, ll delta) { + makeRoot(from); + expose(to); + to->delta = joinDeltas(to->delta, delta); + } +}; diff --git a/content/datastructures/bitset.cpp b/content/datastructures/bitset.cpp new file mode 100644 index 0000000..d19abb0 --- /dev/null +++ b/content/datastructures/bitset.cpp @@ -0,0 +1,7 @@ +bitset<10> bits(0b000010100); +bits._Find_first(); //2 +bits._Find_next(2); //4 +bits._Find_next(4); //10 bzw. N +bits[x] = 1; //not bits.set(x) or bits.reset(x)! +bits[x].flip(); //not bits.flip(x)! +bits.count(); //number of set bits diff --git a/content/datastructures/datastructures.tex b/content/datastructures/datastructures.tex new file mode 100644 index 0000000..40132a9 --- /dev/null +++ b/content/datastructures/datastructures.tex @@ -0,0 +1,121 @@ +\section{Datenstrukturen} + +\begin{algorithm}{Segmentbaum} + \begin{methods} + \method{SegTree}{baut den Baum auf}{n} + \method{query}{findet Summe über $[l, r)$}{\log(n)} + \method{update}{ändert einen Wert}{\log(n)} + \end{methods} + \sourcecode{datastructures/segmentTree.cpp} + + \subsubsection{Lazy Propagation} + Assignment modifications, sum queries \\ + \method{lower\_bound}{erster Index in $[l, r)$ $\geq$ x (erfordert max-combine)}{\log(n)} + \sourcecode{datastructures/lazyPropagation.cpp} +\end{algorithm} + +\begin{algorithm}{Wavelet Tree} + \begin{methods} + \method{WaveletTree}{baut den Baum auf}{n\*\log(\Sigma)} + \method{kth}{sort $[l, r)[k]$}{\log(\Sigma)} + \method{countSmaller}{Anzahl elemente in $[l, r)$ kleiner als $k$}{\log(\Sigma)} + \end{methods} + \sourcecode{datastructures/waveletTree.cpp} +\end{algorithm} +\columnbreak + +\begin{algorithm}{Fenwick Tree} + \begin{methods} + \method{init}{baut den Baum auf}{n\*\log(n)} + \method{prefix\_sum}{summe von $[0, i]$}{\log(n)} + \method{update}{addiert ein Delta zu einem Element}{\log(n)} + \end{methods} + \sourcecode{datastructures/fenwickTree.cpp} + + \begin{methods} + \method{init}{baut den Baum auf}{n\*\log(n)} + \method{prefix\_sum}{summe von [$0, i]$}{\log(n)} + \method{update}{addiert ein Delta zu allen Elementen $[l, r)$. $l\leq r$!}{\log(n)} + \end{methods} + \sourcecode{datastructures/fenwickTree2.cpp} +\end{algorithm} + +\begin{algorithm}{STL-Rope (Implicit Cartesian Tree)} + \sourcecode{datastructures/stlRope.cpp} +\end{algorithm} +\columnbreak + +\begin{algorithm}{(Implicit) Treap (Cartesian Tree)} + \begin{methods} + \method{insert}{fügt wert $\mathit{val}$ an stelle $i$ ein (verschiebt alle Positionen $\geq i$)}{\log(n)} + \method{remove}{löscht werte $[i,i+\mathit{count})$}{\log(n)} + \end{methods} + \sourcecode{datastructures/treap2.cpp} +\end{algorithm} + +\begin{algorithm}{Range Minimum Query} + \begin{methods} + \method{init}{baut Struktur auf}{n\*\log(n)} + \method{queryIdempotent}{Index des Minimums in $[l, r)$. $l> { + // (for doubles, use inf = 1/.0, div(a,b) = a/b) + ll div(ll a, ll b) {return a / b - ((a ^ b) < 0 && a % b);} + + bool isect(iterator x, iterator y) { + if (y == end()) {x->p = INF; return false;} + if (x->m == y->m) x->p = x->b > y->b ? INF : -INF; + else x->p = div(y->b - x->b, x->m - y->m); + return x->p >= y->p; + } + + void add(ll m, ll b) { + auto x = insert({m, b, 0}); + while (isect(x, next(x))) erase(next(x)); + if (x != begin()) { + x--; + if (isect(x, next(x))) { + erase(next(x)); + isect(x, next(x)); + }} + while (x != begin() && prev(x)->p >= x->p) { + x--; + isect(x, erase(next(x))); + }} + + ll query(ll x) { + auto l = *lower_bound(x); + return l.m * x + l.b; + } +}; diff --git a/content/datastructures/fenwickTree.cpp b/content/datastructures/fenwickTree.cpp new file mode 100644 index 0000000..eb5cd73 --- /dev/null +++ b/content/datastructures/fenwickTree.cpp @@ -0,0 +1,15 @@ +vector tree; + +void update(int i, ll val) { + for (i++; i < sz(tree); i += i & -i) tree[i] += val; +} + +void init(int n) { + tree.assign(n + 1, 0); +} + +ll prefix_sum(int i) { + ll sum = 0; + for (i++; i > 0; i -= i & -i) sum += tree[i]; + return sum; +} diff --git a/content/datastructures/fenwickTree2.cpp b/content/datastructures/fenwickTree2.cpp new file mode 100644 index 0000000..9384e3c --- /dev/null +++ b/content/datastructures/fenwickTree2.cpp @@ -0,0 +1,21 @@ +vector add, mul; + +void update(int l, int r, ll val) { + for (int tl = l + 1; tl < sz(add); tl += tl & -tl) + add[tl] += val, mul[tl] -= val * l; + for (int tr = r + 1; tr < sz(add); tr += tr & -tr) + add[tr] -= val, mul[tr] += val * r; +} + +void init(vector& v) { + mul.assign(sz(v) + 1, 0); + add.assign(sz(v) + 1, 0); + for(int i = 0; i < sz(v); i++) update(i, i + 1, v[i]); +} + +ll prefix_sum(int i) { + ll res = 0; i++; + for (int ti = i; ti > 0; ti -= ti & -ti) + res += add[ti] * i + mul[ti]; + return res; +} diff --git a/content/datastructures/lazyPropagation.cpp b/content/datastructures/lazyPropagation.cpp new file mode 100644 index 0000000..441590e --- /dev/null +++ b/content/datastructures/lazyPropagation.cpp @@ -0,0 +1,85 @@ +struct SegTree { + using T = ll; using U = ll; + int n; + static constexpr T E = 0; // Neutral element for combine + static constexpr U UF = inf; // Unused value by updates + vector tree; + int h; + vector lazy; + vector k; // size of segments (optional) + + SegTree(const vector& a) : n(sz(a) + 1), tree(2 * n, E), + //SegTree(int size, T def = E) : n(size + 1), tree(2 * n, def), + h(__lg(2 * n)), lazy(n, UF), k(2 * n, 1) { + copy(all(a), tree.begin() + n); + for (int i = n - 1; i > 0; i--) { + k[i] = 2 * k[2 * i]; + tree[i] = comb(tree[2 * i], tree[2 * i + 1]); + }} + + T comb(T a, T b) {return a + b;} // Modify this + E + + void apply(int i, U val) { // And this + UF + tree[i] = val * k[i]; + if (i < n) lazy[i] = val; // Don't forget this + } + + void push_down(int i) { + if (lazy[i] != UF) { + apply(2 * i, lazy[i]); + apply(2 * i + 1, lazy[i]); + lazy[i] = UF; + }} + + void push(int i) { + for (int s = h; s > 0; s--) push_down(i >> s); + } + + void build(int i) { + while (i /= 2) { + push_down(i); + tree[i] = comb(tree[2 * i], tree[2 * i + 1]); + }} + + void update(int l, int r, U val) { + l += n, r += n; + int l0 = l, r0 = r; + push(l0), push(r0 - 1); + for (; l < r; l /= 2, r /= 2) { + if (l&1) apply(l++, val); + if (r&1) apply(--r, val); + } + build(l0), build(r0 - 1); + } + + T query(int l, int r) { + l += n, r += n; + push(l), push(r - 1); + T resL = E, resR = E; + for (; l < r; l /= 2, r /= 2) { + if (l&1) resL = comb(resL, tree[l++]); + if (r&1) resR = comb(tree[--r], resR); + } + return comb(resL, resR); + } + + // Optional: + int lower_bound(int l, int r, T x) { + l += n, r += n; + push(l), push(r - 1); + int a[64] = {}, lp = 0, rp = 64; + for (; l < r; l /= 2, r /= 2) { + if (l&1) a[lp++] = l++; + if (r&1) a[--rp] = --r; + } + for (int i : a) if (i != 0 && tree[i] >= x) { // Modify this + while (i < n) { + push_down(i); + if (tree[2 * i] >= x) i = 2 * i; // And this + else i = 2 * i + 1; + } + return i - n; + } + return -1; + } +}; diff --git a/content/datastructures/lichao.cpp b/content/datastructures/lichao.cpp new file mode 100644 index 0000000..f66778e --- /dev/null +++ b/content/datastructures/lichao.cpp @@ -0,0 +1,46 @@ +vector xs; // IMPORTANT: Initialize before constructing! +int findX(int i) {return lower_bound(all(xs), i) - begin(xs);} + +struct Fun { // Default: Linear function. Change as needed. + ll m, c; + ll operator()(int x) {return m*xs[x] + c;} +}; + +// Default: Computes min. Change lines with comment for max. +struct Lichao { + static constexpr Fun id = {0, inf}; // {0, -inf} + int n, cap; + vector seg; + Lichao() : n(sz(xs)), cap(2<<__lg(n)), seg(2*cap, id) {} + + void _insert(Fun f, int l, int r, int i) { + while (i < 2*cap){ + int m = (l+r)/2; + if (m >= n) {r = m; i = 2*i; continue;} + Fun &g = seg[i]; + if (f(m) < g(m)) swap(f, g); // > + if (f(l) < g(l)) r = m, i = 2*i; // > + else l = m, i = 2*i+1; + }} + void insert(Fun f) {_insert(f, 0, cap, 1);} + + void _segmentInsert(Fun f, int l, int r, int a, int b, int i) { + if (l <= a && b <= r) _insert(f, a, b, i); + else if (a < r && l < b){ + int m = (a+b)/2; + _segmentInsert(f, l, r, a, m, 2*i); + _segmentInsert(f, l, r, m, b, 2*i+1); + }} + void segmentInsert(Fun f, ll l, ll r) { + _segmentInsert(f, findX(l), findX(r), 0, cap, 1); + } + + ll _query(int x) { + ll ans = inf; // -inf + for (int i = x + cap; i > 0; i /= 2) { + ans = min(ans, seg[i](x)); // max + } + return ans; + } + ll query(ll x) {return _query(findX(x));} +}; diff --git a/content/datastructures/monotonicConvexHull.cpp b/content/datastructures/monotonicConvexHull.cpp new file mode 100644 index 0000000..44bff83 --- /dev/null +++ b/content/datastructures/monotonicConvexHull.cpp @@ -0,0 +1,27 @@ +// Lower Envelope mit MONOTONEN Inserts und Queries. Jede neue +// Gerade hat kleinere Steigung als alle vorherigen. +struct Line { + ll m, b; + ll operator()(ll x) {return m*x+b;} +}; + +vector ls; +int ptr = 0; + +bool bad(Line l1, Line l2, Line l3) { + return (l3.b-l1.b)*(l1.m-l2.m) < (l2.b-l1.b)*(l1.m-l3.m); +} + +void add(ll m, ll b) { // Laufzeit O(1) amortisiert + while (sz(ls) > 1 && bad(ls.end()[-2], ls.end()[-1], {m, b})) { + ls.pop_back(); + } + ls.push_back({m, b}); + ptr = min(ptr, sz(ls) - 1); +} + +ll query(ll x) { // Laufzeit: O(1) amortisiert + ptr = min(ptr, sz(ls) - 1); + while (ptr < sz(ls)-1 && ls[ptr + 1](x) < ls[ptr](x)) ptr++; + return ls[ptr](x); +} \ No newline at end of file diff --git a/content/datastructures/pbds.cpp b/content/datastructures/pbds.cpp new file mode 100644 index 0000000..f0889a2 --- /dev/null +++ b/content/datastructures/pbds.cpp @@ -0,0 +1,18 @@ +#include +using namespace __gnu_pbds; +template +using Tree = tree, rb_tree_tag, + tree_order_statistics_node_update>; +// T.order_of_key(x): number of elements strictly less than x +// *T.find_by_order(k): k-th element + +template +struct chash { + static const uint64_t C = ll(2e18 * acos(-1)) | 199; // random odd + size_t operator()(T o) const { + return __builtin_bswap64(hash()(o) * C); +}}; +template +using hashMap = gp_hash_table>; +template +using hashSet = gp_hash_table>; diff --git a/content/datastructures/persistent.cpp b/content/datastructures/persistent.cpp new file mode 100644 index 0000000..4093cdc --- /dev/null +++ b/content/datastructures/persistent.cpp @@ -0,0 +1,18 @@ +template +struct persistent { + int& time; + vector> data; + + persistent(int& time, T value = {}) + : time(time), data(1, {time, value}) {} + + T get(int t) { + return prev(upper_bound(all(data), pair{t+1, T{}}))->second; + } + + int set(T value) { + time += 2; + data.push_back({time, value}); + return time; + } +}; diff --git a/content/datastructures/persistentArray.cpp b/content/datastructures/persistentArray.cpp new file mode 100644 index 0000000..60d8b17 --- /dev/null +++ b/content/datastructures/persistentArray.cpp @@ -0,0 +1,24 @@ +template +struct persistentArray { + int time; + vector> data; + vector> mods; + + persistentArray(int n, T value = {}) + : time(0), data(n, {time, value}) {} + + T get(int p, int t) {return data[p].get(t);} + + int set(int p, T value) { + mods.push_back({p, time}); + return data[p].set(value); + } + + void reset(int t) { + while (!mods.empty() && mods.back().second > t) { + data[mods.back().first].data.pop_back(); + mods.pop_back(); + } + time = t; + } +}; diff --git a/content/datastructures/segmentTree.cpp b/content/datastructures/segmentTree.cpp new file mode 100644 index 0000000..6b69d0b --- /dev/null +++ b/content/datastructures/segmentTree.cpp @@ -0,0 +1,42 @@ +struct SegTree { + using T = ll; + int n; + vector tree; + static constexpr T E = 0; // Neutral element for combine + + SegTree(vector& a) : n(sz(a)), tree(2 * n) { + //SegTree(int size, T val = E) : n(size), tree(2 * n, val) { + copy(all(a), tree.begin() + n); + for (int i = n - 1; i > 0; i--) { // remove for range update + tree[i] = comb(tree[2 * i], tree[2 * i + 1]); + }} + + T comb(T a, T b) {return a + b;} // modify this + neutral + + void update(int i, T val) { + tree[i += n] = val; // apply update code + while (i /= 2) tree[i] = comb(tree[2 * i], tree[2 * i + 1]); + } + + T query(int l, int r) { + T resL = E, resR = E; + for (l += n, r += n; l < r; l /= 2, r /= 2) { + if (l&1) resL = comb(resL, tree[l++]); + if (r&1) resR = comb(tree[--r], resR); + } + return comb(resL, resR); + } + + // OR: range update + point query, needs commutative comb + void modify(int l, int r, T val) { + for (l += n, r += n; l < r; l /= 2, r /= 2) { + if (l&1) tree[l] = comb(tree[l], val), l++; + if (r&1) --r, tree[r] = comb(tree[r], val); + }} + + T query(int i) { + T res = E; + for (i += n; i > 0; i /= 2) res = comb(res, tree[i]); + return res; + } +}; diff --git a/content/datastructures/sparseTable.cpp b/content/datastructures/sparseTable.cpp new file mode 100644 index 0000000..b3f946e --- /dev/null +++ b/content/datastructures/sparseTable.cpp @@ -0,0 +1,24 @@ +struct SparseTable { + vector> st; + ll *a; + + int better(int lidx, int ridx) { + return a[lidx] <= a[ridx] ? lidx : ridx; + } + + void init(vector* vec) { + int n = sz(*vec); + a = vec->data(); + st.assign(__lg(n) + 1, vector(n)); + iota(all(st[0]), 0); + for (int j = 0; (2 << j) <= n; j++) { + for (int i = 0; i + (2 << j) <= n; i++) { + st[j + 1][i] = better(st[j][i] , st[j][i + (1 << j)]); + }}} + + int queryIdempotent(int l, int r) { + if (r <= l) return -1; + int j = __lg(r - l); //31 - builtin_clz(r - l); + return better(st[j][l] , st[j][r - (1 << j)]); + } +}; diff --git a/content/datastructures/sparseTableDisjoint.cpp b/content/datastructures/sparseTableDisjoint.cpp new file mode 100644 index 0000000..55165d4 --- /dev/null +++ b/content/datastructures/sparseTableDisjoint.cpp @@ -0,0 +1,27 @@ +struct DisjointST { + static constexpr ll neutral = 0; + vector> dst; + ll* a; + + ll combine(const ll& x, const ll& y) { + return x + y; + } + + void init(vector* vec) { + int n = sz(*vec); + a = vec->data(); + dst.assign(__lg(n) + 1, vector(n + 1, neutral)); + for (int h = 0, l = 1; l <= n; h++, l *= 2) { + for (int c = l; c < n + l; c += 2 * l) { + for (int i = c; i < min(n, c + l); i++) + dst[h][i + 1] = combine(dst[h][i], vec->at(i)); + for (int i = min(n, c); i > c - l; i--) + dst[h][i - 1] = combine(vec->at(i - 1), dst[h][i]); + }}} + + ll query(int l, int r) { + if (r <= l) return neutral; + int h = __lg(l ^ r); + return combine(dst[h][l], dst[h][r]); + } +}; diff --git a/content/datastructures/stlHashMap.cpp b/content/datastructures/stlHashMap.cpp new file mode 100644 index 0000000..b107dde --- /dev/null +++ b/content/datastructures/stlHashMap.cpp @@ -0,0 +1,17 @@ +#include +using namespace __gnu_pbds; + +template +struct betterHash { + size_t operator()(T o) const { + size_t h = hash()(o) ^ 42394245; //random value + h = ((h >> 16) ^ h) * 0x45d9f3b; + h = ((h >> 16) ^ h) * 0x45d9f3b; + h = ((h >> 16) ^ h); + return h; +}}; + +template> +using hashMap = gp_hash_table; +template> +using hashSet = gp_hash_table; diff --git a/content/datastructures/stlPriorityQueue.cpp b/content/datastructures/stlPriorityQueue.cpp new file mode 100644 index 0000000..32b2455 --- /dev/null +++ b/content/datastructures/stlPriorityQueue.cpp @@ -0,0 +1,8 @@ +#include +template +using pQueue = __gnu_pbds::priority_queue; //> + +auto it = pq.push(5); +pq.modify(it, 6); +pq.join(pq2); +// push, join are O(1), pop, modify, erase O(log n) amortized diff --git a/content/datastructures/stlRope.cpp b/content/datastructures/stlRope.cpp new file mode 100644 index 0000000..804cd67 --- /dev/null +++ b/content/datastructures/stlRope.cpp @@ -0,0 +1,8 @@ +#include +using namespace __gnu_cxx; +rope v; // Wie normaler Container. +v.push_back(num); // O(log(n)) +rope sub = v.substr(start, length); // O(log(n)) +v.erase(start, length); // O(log(n)) +v.insert(v.mutable_begin() + offset, sub); // O(log(n)) +for(auto it = v.mutable_begin(); it != v.mutable_end(); it++) diff --git a/content/datastructures/stlTree.cpp b/content/datastructures/stlTree.cpp new file mode 100644 index 0000000..fbb68b9 --- /dev/null +++ b/content/datastructures/stlTree.cpp @@ -0,0 +1,13 @@ +#include +#include +using namespace std; using namespace __gnu_pbds; +template +using Tree = tree, rb_tree_tag, + tree_order_statistics_node_update>; + +int main() { + Tree X; + for (int i : {1, 2, 4, 8, 16}) X.insert(i); + *X.find_by_order(3); // => 8 + X.order_of_key(10); // => 4 = min i, mit X[i] >= 10 +} diff --git a/content/datastructures/treap.cpp b/content/datastructures/treap.cpp new file mode 100644 index 0000000..c96e36a --- /dev/null +++ b/content/datastructures/treap.cpp @@ -0,0 +1,79 @@ +struct node { + int key, prio, left, right, size; + node(int key, int prio) : key(key), prio(prio), left(-1), + right(-1), size(1) {}; +}; + +vector treap; + +int getSize(int root) { + return root < 0 ? 0 : treap[root].size; +} + +void update(int root) { + if (root < 0) return; + treap[root].size = 1 + getSize(treap[root].left) + + getSize(treap[root].right); +} + +pair split(int root, int minKeyRight) { + if (root < 0) return {-1, -1}; + if (treap[root].key >= minKeyRight) { + auto leftSplit = split(treap[root].left, minKeyRight); + treap[root].left = leftSplit.second; + update(root); + leftSplit.second = root; + return leftSplit; + } else { + auto rightSplit = split(treap[root].right, minKeyRight); + treap[root].right = rightSplit.first; + update(root); + rightSplit.first = root; + return rightSplit; +}} + +int merge (int left, int right) { + if (left < 0) return right; + if (right < 0) return left; + if (treap[left].prio < treap[right].prio) { //min priority heap + treap[left].right = merge(treap[left].right, right); + update(left); + return left; + } else { + treap[right].left = merge(left, treap[right].left); + update(right); + return right; +}} + +//insert values with high priority first +int insert(int root, int key, int prio) { + int next = sz(treap); + treap.emplace_back(key, prio); + auto t = split(root, key); + //returns new root + return merge(merge(t.first, next), t.second); +} + +int remove(int root, int key) { + if (root < 0) return -1; + if (key < treap[root].key) { + treap[root].left = remove(treap[root].left, key); + update(root); + return root; + } else if (key > treap[root].key) { + treap[root].right = remove(treap[root].right, key); + update(root); + return root; + } else { //check prio? + return merge(treap[root].left, treap[root].right); +}} + +int kth(int root, int k) { + if (root < 0) return -1; + int leftSize = getSize(treap[root].left); + if (k < leftSize) return kth(treap[root].left, k); + else if (k > leftSize) { + return kth(treap[root].right, k - 1 - leftSize); + } + return root; +} diff --git a/content/datastructures/treap2.cpp b/content/datastructures/treap2.cpp new file mode 100644 index 0000000..c5a60e9 --- /dev/null +++ b/content/datastructures/treap2.cpp @@ -0,0 +1,79 @@ +mt19937 rng(0xc4bd5dad); +struct Treap { + struct Node { + ll val; + int prio, size = 1, l = -1, r = -1; + Node(ll x) : val(x), prio(rng()) {} + }; + + vector treap; + int root = -1; + + int getSize(int v) { + return v < 0 ? 0 : treap[v].size; + } + + void upd(int v) { + if (v < 0) return; + auto& V = treap[v]; + V.size = 1 + getSize(V.l) + getSize(V.r); + // Update Node Code + } + + void push(int v) { + if (v < 0) return; + //auto& V = treap[v]; + //if (V.lazy) { + // Lazy Propagation Code + // if (V.l >= 0) treap[V.l].lazy = true; + // if (V.r >= 0) treap[V.r].lazy = true; + // V.lazy = false; + //} + } + + pair split(int v, int k) { + if (v < 0) return {-1, -1}; + auto& V = treap[v]; + push(v); + if (getSize(V.l) >= k) { // "V.val >= k" for lower_bound(k) + auto [left, right] = split(V.l, k); + V.l = right; + upd(v); + return {left, v}; + } else { + // and only "k" + auto [left, right] = split(V.r, k - getSize(V.l) - 1); + V.r = left; + upd(v); + return {v, right}; + }} + + int merge(int left, int right) { + if (left < 0) return right; + if (right < 0) return left; + if (treap[left].prio < treap[right].prio) { + push(left); + treap[left].r = merge(treap[left].r, right); + upd(left); + return left; + } else { + push(right); + treap[right].l = merge(left, treap[right].l); + upd(right); + return right; + }} + + void insert(int i, ll val) { // and i = val + auto [left, right] = split(root, i); + treap.emplace_back(val); + left = merge(left, sz(treap) - 1); + root = merge(left, right); + } + + void remove(int i, int count = 1) { + auto [left, t_right] = split(root, i); + auto [middle, right] = split(t_right, count); + root = merge(left, right); + } + // for query use remove and read middle BEFORE remerging +}; diff --git a/content/datastructures/unionFind.cpp b/content/datastructures/unionFind.cpp new file mode 100644 index 0000000..dd5a569 --- /dev/null +++ b/content/datastructures/unionFind.cpp @@ -0,0 +1,26 @@ +// unions[i] >= 0 => unions[i] = parent +// unions[i] < 0 => unions[i] = -size +vector unions; + +void init(int n) { //Initialisieren + unions.assign(n, -1); +} + +int findSet(int a) { // Pfadkompression + if (unions[a] < 0) return a; + return unions[a] = findSet(unions[a]); +} + +void linkSets(int a, int b) { // Union by size. + if (unions[b] > unions[a]) swap(a, b); + unions[b] += unions[a]; + unions[a] = b; +} + +void unionSets(int a, int b) { // Diese Funktion aufrufen. + if (findSet(a) != findSet(b)) linkSets(findSet(a), findSet(b)); +} + +int size(int a) { + return -unions[findSet(a)]; +} diff --git a/content/datastructures/waveletTree.cpp b/content/datastructures/waveletTree.cpp new file mode 100644 index 0000000..090cdb2 --- /dev/null +++ b/content/datastructures/waveletTree.cpp @@ -0,0 +1,40 @@ +struct WaveletTree { + using it = vector::iterator; + WaveletTree *ln = nullptr, *rn = nullptr; + vector b = {0}; + ll lo, hi; + + WaveletTree(vector in) : WaveletTree(all(in)) {} + + WaveletTree(it from, it to) : // call above one + lo(*min_element(from, to)), hi(*max_element(from, to) + 1) { + ll mid = (lo + hi) / 2; + auto f = [&](ll x) {return x < mid;}; + for (it c = from; c != to; c++) { + b.push_back(b.back() + f(*c)); + } + if (lo + 1 >= hi) return; + it pivot = stable_partition(from, to, f); + ln = new WaveletTree(from, pivot); + rn = new WaveletTree(pivot, to); + } + + // kth element in sort[l, r) all 0-indexed + ll kth(int l, int r, int k) { + if (k < 0 || l + k >= r) return -1; + if (lo + 1 >= hi) return lo; + int inLeft = b[r] - b[l]; + if (k < inLeft) return ln->kth(b[l], b[r], k); + else return rn->kth(l-b[l], r-b[r], k-inLeft); + } + + // count elements in[l, r) smaller than k + int countSmaller(int l, int r, ll k) { + if (l >= r || k <= lo) return 0; + if (hi <= k) return r - l; + return ln->countSmaller(b[l], b[r], k) + + rn->countSmaller(l-b[l], r-b[r], k); + } + + ~WaveletTree() {delete ln; delete rn;} +}; diff --git a/content/geometry/antipodalPoints.cpp b/content/geometry/antipodalPoints.cpp new file mode 100644 index 0000000..110cc74 --- /dev/null +++ b/content/geometry/antipodalPoints.cpp @@ -0,0 +1,12 @@ +vector> antipodalPoints(vector& h) { + if (sz(h) < 2) return {}; + vector> result; + for (int i = 0, j = 1; i < j; i++) { + while (true) { + result.push_back({i, j}); + if (cross(h[(i + 1) % sz(h)] - h[i], + h[(j + 1) % sz(h)] - h[j]) <= 0) break; + j = (j + 1) % sz(h); + }} + return result; +} diff --git a/content/geometry/circle.cpp b/content/geometry/circle.cpp new file mode 100644 index 0000000..6789c52 --- /dev/null +++ b/content/geometry/circle.cpp @@ -0,0 +1,33 @@ +// berechnet die Schnittpunkte von zwei Kreisen +// (Kreise dürfen nicht gleich sein!) +vector circleIntersection(pt c1, double r1, + pt c2, double r2) { + double d = abs(c1 - c2); + if (d < abs(r1 - r2) || d > abs(r1 + r2)) return {}; + double a = (r1 * r1 - r2 * r2 + d * d) / (2 * d); + pt p = (c2 - c1) * a / d + c1; + if (d == abs(r1 - r2) || d == abs(r1 + r2)) return {p}; + double h = sqrt(r1 * r1 - a * a); + return {p + pt{0, 1} * (c2 - c1) * h / d, + p - pt{0, 1} * (c2 - c1) * h / d}; +} + +// berechnet die Schnittpunkte zwischen +// einem Kreis(Kugel) und einem Strahl (2D und 3D) +vector circleRayIntersection(pt center, double r, + pt orig, pt dir) { + vector result; + double a = norm(dir); + double b = 2 * dot(dir, orig - center); + double c = norm(orig - center) - r * r; + double discr = b * b - 4 * a * c; + if (discr >= 0) { + //t in [0, 1] => schnitt mit Segment [orig, orig + dir] + double t1 = -(b + sqrt(discr)) / (2 * a); + double t2 = -(b - sqrt(discr)) / (2 * a); + if (t1 >= 0) result.push_back(t1 * dir + orig); + if (t2 >= 0 && abs(t1 - t2) > EPS) { + result.push_back(t2 * dir + orig); + }} + return result; +} diff --git a/content/geometry/closestPair.cpp b/content/geometry/closestPair.cpp new file mode 100644 index 0000000..9b115f3 --- /dev/null +++ b/content/geometry/closestPair.cpp @@ -0,0 +1,27 @@ +ll rec(vector::iterator a, int l, int r) { + if (r - l < 2) return INF; + int m = (l + r) / 2; + ll midx = a[m].real(); + ll ans = min(rec(a, l, m), rec(a, m, r)); + + inplace_merge(a+l, a+m, a+r, [](const pt& x, const pt& y) { + return x.imag() < y.imag(); + }); + + pt tmp[8]; + fill(all(tmp), a[l]); + for (int i = l + 1, next = 0; i < r; i++) { + if (ll x = a[i].real() - midx; x * x < ans) { + for (pt& p : tmp) ans = min(ans, norm(p - a[i])); + tmp[next++ & 7] = a[i]; + } + } + return ans; +} + +ll shortestDist(vector a) { // sz(pts) > 1 + sort(all(a), [](const pt& x, const pt& y) { + return x.real() < y.real(); + }); + return rec(a.begin(), 0, sz(a)); +} diff --git a/content/geometry/convexHull.cpp b/content/geometry/convexHull.cpp new file mode 100644 index 0000000..6d89e05 --- /dev/null +++ b/content/geometry/convexHull.cpp @@ -0,0 +1,18 @@ +vector convexHull(vector pts){ + sort(all(pts), [](const pt& a, const pt& b){ + return real(a) == real(b) ? imag(a) < imag(b) + : real(a) < real(b); + }); + pts.erase(unique(all(pts)), pts.end()); + int k = 0; + vector h(2 * sz(pts)); + auto half = [&](auto begin, auto end, int t) { + for (auto it = begin; it != end; it++) { + while (k > t && cross(h[k-2], h[k-1], *it) <= 0) k--; + h[k++] = *it; + }}; + half(all(pts), 1);// Untere Hülle. + half(next(pts.rbegin()), pts.rend(), k);// Obere Hülle. + h.resize(k); + return h; +} diff --git a/content/geometry/delaunay.cpp b/content/geometry/delaunay.cpp new file mode 100644 index 0000000..c813892 --- /dev/null +++ b/content/geometry/delaunay.cpp @@ -0,0 +1,124 @@ +using lll = __int128; +using pt = complex; + +constexpr pt INF_PT = pt(2e18, 2e18); + +bool circ(pt p, pt a, pt b, pt c) {// p in circle(A,B,C), ABC must be ccw + return imag((c-b)*conj(p-c)*(a-p)*conj(b-a)) < 0; +} + +struct QuadEdge { + QuadEdge* rot = nullptr; + QuadEdge* onext = nullptr; + pt orig = INF_PT; + bool used = false; + QuadEdge* rev() const {return rot->rot;} + QuadEdge* lnext() const {return rot->rev()->onext->rot;} + QuadEdge* oprev() const {return rot->onext->rot;} + pt dest() const {return rev()->orig;} +}; + +deque edgeData; + +QuadEdge* makeEdge(pt from, pt to) { + for (int _ : {0,1,2,3}) edgeData.push_back({}); + auto e = edgeData.end() - 4; + for (int j : {0,1,2,3}) e[j].onext = e[j^3].rot = &e[j^(j>>1)]; + e[0].orig = from; + e[1].orig = to; + return &e[0]; +} + +void splice(QuadEdge* a, QuadEdge* b) { + swap(a->onext->rot->onext, b->onext->rot->onext); + swap(a->onext, b->onext); +} + +QuadEdge* connect(QuadEdge* a, QuadEdge* b) { + QuadEdge* e = makeEdge(a->dest(), b->orig); + splice(e, a->lnext()); + splice(e->rev(), b); + return e; +} + +bool valid(QuadEdge* e, QuadEdge* base) { + return cross(e->dest(), base->orig, base->dest()) < 0; +} + +template +QuadEdge* deleteAll(QuadEdge* e, QuadEdge* base) { + if (valid(e, base)) { + while (circ(base->dest(), base->orig, e->dest(), (ccw ? e->onext : e->oprev())->dest())) { + QuadEdge* t = ccw ? e->onext : e->oprev(); + splice(e, e->oprev()); + splice(e->rev(), e->rev()->oprev()); + e = t; + }} + return e; +} + +template +pair rec(IT l, IT r) { + int n = distance(l, r); + if (n <= 3) { + QuadEdge* a = makeEdge(l[0], l[1]); + if (n == 2) return {a, a->rev()}; + QuadEdge* b = makeEdge(l[1], l[2]); + splice(a->rev(), b); + auto side = cross(l[0], l[1], l[2]); + QuadEdge* c = nullptr; + if (side != 0) c = connect(b, a); + if (side >= 0) return {a, b->rev()}; + else return {c->rev(), c}; + } + auto m = l + (n / 2); + auto [ldo, ldi] = rec(l, m); + auto [rdi, rdo] = rec(m, r); + while (true) { + if (cross(rdi->orig, ldi->orig, ldi->dest()) > 0) { + ldi = ldi->lnext(); + } else if (cross(ldi->orig, rdi->orig, rdi->dest()) < 0) { + rdi = rdi->rev()->onext; + } else break; + } + QuadEdge* base = connect(rdi->rev(), ldi); + if (ldi->orig == ldo->orig) ldo = base->rev(); + if (rdi->orig == rdo->orig) rdo = base; + while (true) { + QuadEdge* lcand = deleteAll(base->rev()->onext, base); + QuadEdge* rcand = deleteAll(base->oprev(), base); + if (!valid(lcand, base) && !valid(rcand, base)) break; + if (!valid(lcand, base) || (valid(rcand, base) && + circ(lcand->dest(), lcand->orig, rcand->orig, rcand->dest()))) { + base = connect(rcand, base->rev()); + } else { + base = connect(base->rev(), lcand->rev()); + }} + return {ldo, rdo}; +} + +vector delaunay(vector pts) { + if (sz(pts) <= 2) return {}; + sort(all(pts), [](const pt& a, const pt& b) { + if (real(a) != real(b)) return real(a) < real(b); + return imag(a) < imag(b); + }); + QuadEdge* r = rec(all(pts)).first; + vector edges = {r}; + while (cross(r->onext->dest(), r->dest(), r->orig) < 0) r = r->onext; + auto add = [&](QuadEdge* e){ + QuadEdge* cur = e; + do { + cur->used = true; + pts.push_back(cur->orig); + edges.push_back(cur->rev()); + cur = cur->lnext(); + } while (cur != e); + }; + add(r); + pts.clear(); + for (int i = 0; i < sz(edges); i++) { + if (!edges[i]->used) add(edges[i]); + } + return pts; +} diff --git a/content/geometry/formulas.cpp b/content/geometry/formulas.cpp new file mode 100644 index 0000000..5d4e10d --- /dev/null +++ b/content/geometry/formulas.cpp @@ -0,0 +1,42 @@ +// Komplexe Zahlen als Punkte. Wenn immer möglich complex +// verwenden. Funktionen wie abs() geben dann aber ll zurück. +using pt = complex; + +constexpr double PIU = acos(-1.0l); // PIL < PI < PIU +constexpr double PIL = PIU-2e-19l; + +// Winkel zwischen Punkt und x-Achse in [-PI, PI]. +double angle(pt a) {return arg(a);} + +// rotiert Punkt im Uhrzeigersinn um den Ursprung. +pt rotate(pt a, double theta) {return a * polar(1.0, theta);} + +// Skalarprodukt. +auto dot(pt a, pt b) {return real(conj(a) * b);} + +// abs()^2.(pre c++20) +auto norm(pt a) {return dot(a, a);} + +// Kreuzprodukt, 0, falls kollinear. +auto cross(pt a, pt b) {return imag(conj(a) * b);} +auto cross(pt p, pt a, pt b) {return cross(a - p, b - p);} + +// 1 => c links von a->b +// 0 => a, b und c kolliniear +// -1 => c rechts von a->b +int ccw(pt a, pt b, pt c) { + auto orien = cross(b - a, c - a); + return (orien > EPS) - (orien < -EPS); +} + +// Liegt d in der gleichen Ebene wie a, b, und c? +bool isCoplanar(pt a, pt b, pt c, pt d) { + return abs((b - a) * (c - a) * (d - a)) < EPS; +} + +// charakterisiert winkel zwischen Vektoren u und v +pt uniqueAngle(pt u, pt v) { + pt tmp = v * conj(u); + ll g = abs(gcd(real(tmp), imag(tmp))); + return tmp / g; +} diff --git a/content/geometry/formulas3d.cpp b/content/geometry/formulas3d.cpp new file mode 100644 index 0000000..dee3ce8 --- /dev/null +++ b/content/geometry/formulas3d.cpp @@ -0,0 +1,53 @@ +// Skalarprodukt +auto operator|(pt3 a, pt3 b) { + return a.x * b.x + a.y*b.y + a.z*b.z; +} +auto dot(pt3 a, pt3 b) {return a|b;} + +// Kreuzprodukt +pt3 operator*(pt3 a, pt3 b) {return {a.y*b.z - a.z*b.y, + a.z*b.x - a.x*b.z, + a.x*b.y - a.y*b.x};} +pt3 cross(pt3 a, pt3 b) {return a*b;} + +// Länge von a +double abs(pt3 a) {return sqrt(dot(a, a));} +double abs(pt3 a, pt3 b) {return abs(b - a);} + +// Mixedprodukt +auto mixed(pt3 a, pt3 b, pt3 c) {return a*b|c;}; + +// orientierung von p zu der Ebene durch a, b, c +// -1 => gegen den Uhrzeigersinn, +// 0 => kolliniear, +// 1 => im Uhrzeigersinn. +int ccw(pt3 a, pt3 b, pt3 c, pt3 p) { + auto orien = mixed(b - a, c - a, p - a); + return (orien > EPS) - (orien < -EPS); +} + +// Entfernung von Punkt p zur Ebene a,b,c. +double distToPlane(pt3 a, pt3 b, pt3 c, pt3 p) { + pt3 n = cross(b-a, c-a); + return (abs(dot(n, p)) - dot(n, a)) / abs(n); +} + +// Liegt p in der Ebene a,b,c? +bool pointOnPlane(pt3 a, pt3 b, pt3 c, pt3 p) { + return ccw(a, b, c, p) == 0; +} + +// Schnittpunkt von der Grade a-b und der Ebene c,d,e +// die Grade darf nicht parallel zu der Ebene sein! +pt3 linePlaneIntersection(pt3 a, pt3 b, pt3 c, pt3 d, pt3 e) { + pt3 n = cross(d-c, e-c); + pt3 d = b - a; + return a - d * (dot(n, a) - dot(n, c)) / dot(n, d); +} + +// Abstand zwischen der Grade a-b und c-d +double lineLineDist(pt3 a, pt3 b, pt3 c, pt3 d) { + pt3 n = cross(b - a, d - c); + if (abs(n) < EPS) return distToLine(a, b, c); + return abs(dot(a - c, n)) / abs(n); +} diff --git a/content/geometry/geometry.tex b/content/geometry/geometry.tex new file mode 100644 index 0000000..92285c4 --- /dev/null +++ b/content/geometry/geometry.tex @@ -0,0 +1,62 @@ +\section{Geometrie} + +\begin{algorithm}{Closest Pair} + \begin{methods} + \method{shortestDist}{kürzester Abstand zwischen Punkten}{n\*\log(n)} + \end{methods} + \sourcecode{geometry/closestPair.cpp} +\end{algorithm} + +\begin{algorithm}{Konvexehülle} + \begin{methods} + \method{convexHull}{berechnet konvexe Hülle}{n\*\log(n)} + \end{methods} + \begin{itemize} + \item konvexe Hülle gegen den Uhrzeigersinn sortiert + \item nur Eckpunkte enthalten(für alle Punkte = im CCW Test entfernen) + \item erster und letzter Punkt sind identisch + \end{itemize} + \sourcecode{geometry/convexHull.cpp} +\end{algorithm} + +\begin{algorithm}{Rotating calipers} + \begin{methods} + \method{antipodalPoints}{berechnet antipodale Punkte}{n} + \end{methods} + \textbf{WICHTIG:} Punkte müssen gegen den Uhrzeigersinn sortiert sein und konvexes Polygon bilden! + \sourcecode{geometry/antipodalPoints.cpp} +\end{algorithm} + +\subsection{Formeln~~--~\texttt{std::complex}} +\sourcecode{geometry/formulas.cpp} +\sourcecode{geometry/linesAndSegments.cpp} +\sourcecode{geometry/sortAround.cpp} +\input{geometry/triangle} +\sourcecode{geometry/triangle.cpp} +\sourcecode{geometry/polygon.cpp} +\sourcecode{geometry/circle.cpp} + +\subsection{Formeln -- 3D} +\sourcecode{geometry/formulas3d.cpp} + +\optional{ + \subsection{3D-Kugeln} + \sourcecode{geometry/spheres.cpp} +} + +\begin{algorithm}{Half-plane intersection} + \sourcecode{geometry/hpi.cpp} +\end{algorithm} + +\begin{algorithm}[optional]{Delaunay Triangulierung} + \begin{methods} + \method{delaunay}{berechnet Triangulierung}{n\*\log(n)} + \end{methods} + \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Traingulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig. + \sourcecode{geometry/delaunay.cpp} +\end{algorithm} + +\optional{ +\subsection{Geraden} +\sourcecode{geometry/lines.cpp} +} diff --git a/content/geometry/hpi.cpp b/content/geometry/hpi.cpp new file mode 100644 index 0000000..3509e0e --- /dev/null +++ b/content/geometry/hpi.cpp @@ -0,0 +1,68 @@ +constexpr ll inf = 0x1FFF'FFFF'FFFF'FFFF;//THIS CODE IS WIP + +bool left(pt p) {return real(p) < 0 || + (real(p) == 0 && imag(p) < 0);} +struct hp { + pt from, to; + + hp(pt a, pt b) : from(a), to(b) {} + hp(pt dummy) : hp(dummy, dummy) {} + + bool dummy() const {return from == to;} + pt dir() const {return dummy() ? to : to - from;} + bool operator<(const hp& o) const { + if (left(dir()) != left(o.dir())) + return left(dir()) > left(o.dir()); + return cross(dir(), o.dir()) > 0; + } + + using lll = __int128; + using ptl = complex; + ptl mul(lll m, ptl p) const {return m*p;}//ensure 128bit + + bool check(const hp& a, const hp& b) const { + if (dummy() || b.dummy()) return false; + if (a.dummy()) { + ll ort = sgn(cross(b.dir(), dir())); + if (ort == 0) return cross(from, to, a.from) < 0; + return cross(b.dir(), a.dir()) * ort > 0; + } + ll y = cross(a.dir(), b.dir()); + ll z = cross(b.from - a.from, b.dir()); + ptl i = mul(y, a.from) + mul(z, a.dir()); //intersect a and b + // check if i is outside/right of x + return imag(conj(mul(sgn(y),dir()))*(i-mul(y,from))) < 0; + } +}; + +constexpr ll lim = 2e9+7; + +deque intersect(vector hps) { + hps.push_back(hp(pt{lim+1,-1})); + hps.push_back(hp(pt{lim+1,1})); + sort(all(hps)); + + deque dq = {hp(pt{-lim, 1})}; + for (auto x : hps) { + while (sz(dq) > 1 && x.check(dq.end()[-1], dq.end()[-2])) + dq.pop_back(); + while (sz(dq) > 1 && x.check(dq[0], dq[1])) + dq.pop_front(); + + if (cross(x.dir(), dq.back().dir()) == 0) { + if (dot(x.dir(), dq.back().dir()) < 0) return {}; + if (cross(x.from, x.to, dq.back().from) < 0) + dq.pop_back(); + else continue; + } + dq.push_back(x); + } + + while (sz(dq) > 2 && dq[0].check(dq.end()[-1], dq.end()[-2])) + dq.pop_back(); + while (sz(dq) > 2 && dq.end()[-1].check(dq[0], dq[1])) + dq.pop_front(); + + if (sz(dq) < 3) return {}; + return dq; +} diff --git a/content/geometry/lines.cpp b/content/geometry/lines.cpp new file mode 100644 index 0000000..95536a4 --- /dev/null +++ b/content/geometry/lines.cpp @@ -0,0 +1,33 @@ +struct line { + double a, b, c; // ax + by + c = 0; vertikale Line: b = 0, sonst: b = 1 + line(pt p, pt q) : a(-imag(q-p)), b(real(q-p)), c(cross({b, -a},p)) {} +}; + +line pointsToLine(pt p1, pt p2) { + line l; + if (abs(real(p1 - p2)) < EPS) { + l.a = 1; l.b = 0.0; l.c = -real(p1); + } else { + l.a = -imag(p1 - p2) / real(p1 - p2); + l.b = 1.0; + l.c = -(l.a * real(p1)) - imag(p1); + } + return l; +} + +bool parallel(line l1, line l2) { + return (abs(l1.a - l2.a) < EPS) && (abs(l1.b - l2.b) < EPS); +} + +bool same(line l1, line l2) { + return parallel(l1, l2) && (abs(l1.c - l2.c) < EPS); +} + +bool intersect(line l1, line l2, pt& p) { + if (parallel(l1, l2)) return false; + double y, x = (l2.b * l1.c - l1.b * l2.c) / (l2.a * l1.b - l1.a * l2.b); + if (abs(l1.b) > EPS) y = -(l1.a * x + l1.c); + else y = -(l2.a * x + l2.c); + p = {x, y}; + return true; +} diff --git a/content/geometry/linesAndSegments.cpp b/content/geometry/linesAndSegments.cpp new file mode 100644 index 0000000..1e21cba --- /dev/null +++ b/content/geometry/linesAndSegments.cpp @@ -0,0 +1,89 @@ +// Liegt p auf der Geraden a-b? 2d und 3d +bool pointOnLine(pt a, pt b, pt p) { + return ccw(a, b, p) == 0; +} + +// Test auf Linienschnitt zwischen a-b und c-d. (nicht identisch) +bool lineIntersection(pt a, pt b, pt c, pt d) { + return abs(cross(a - b, c - d)) < EPS; +} + +// Berechnet den Schnittpunkt der Graden a-b und c-d. +// die Graden dürfen nicht parallel sein! +pt lineIntersection2(pt a, pt b, pt c, pt d) { + double x = cross(b - a, d - c); + double y = cross(c - a, d - c); + return a + y/x*(b - a); +} + +// Entfernung von Punkt p zur Geraden durch a-b. 2d und 3d +double distToLine(pt a, pt b, pt p) { + return abs(cross(p - a, b - a)) / abs(b - a); +} + +// Projiziert p auf die Gerade a-b +pt projectToLine(pt a, pt b, pt p) { + return a + (b - a) * dot(p - a, b - a) / norm(b - a); +} + +// sortiert alle Punkte pts auf einer Linie entsprechend dir +void sortLine(pt dir, vector& pts) { // (2d und 3d) + sort(all(pts), [&](pt a, pt b){ + return dot(dir, a) < dot(dir, b); + }); +} + +// Liegt p auf der Strecke a-b? (nutze < für inberhalb) +bool pointOnSegment(pt a, pt b, pt p) { + if (ccw(a, b, p) != 0) return false; + auto dist = norm(a - b); + return norm(a - p) <= dist && norm(b - p) <= dist; +} + +// Entfernung von Punkt p zur Strecke a-b. +double distToSegment(pt a, pt b, pt p) { + if (a == b) return abs(p - a); + if (dot(p - a, b - a) <= 0) return abs(p - a); + if (dot(p - b, b - a) >= 0) return abs(p - b); + return distToLine(a, b, p); +} + +// Test auf Streckenschnitt zwischen a-b und c-d. +bool segmentIntersection(pt a, pt b, pt c, pt d) { + if (ccw(a, b, c) == 0 && ccw(a, b, d) == 0) + return pointOnSegment(a,b,c) || + pointOnSegment(a,b,d) || + pointOnSegment(c,d,a) || + pointOnSegment(c,d,b); + return ccw(a, b, c) * ccw(a, b, d) <= 0 && + ccw(c, d, a) * ccw(c, d, b) <= 0; +} + +// Berechnet die Schnittpunkte der Strecken a-b und c-d. +// Enthält entweder keinen Punkt, den einzigen Schnittpunkt +// oder die Endpunkte der Schnittstrecke. +vector segmentIntersection2(pt a, pt b, pt c, pt d) { + double x = cross(b - a, d - c); + double y = cross(c - a, d - c); + double z = cross(b - a, a - c); + if (x < 0) {x = -x; y = -y; z = -z;} + if (y < -EPS || y-x > EPS || z < -EPS || z-x > EPS) return {}; + if (x > EPS) return {a + y/x*(b - a)}; + vector result; + auto insertUnique = [&](pt p) { + for (auto q : result) if (abs(p - q) < EPS) return; + result.push_back(p); + }; + if (dot(c-a, d-a) < EPS) insertUnique(a); + if (dot(c-b, d-b) < EPS) insertUnique(b); + if (dot(a-c, b-c) < EPS) insertUnique(c); + if (dot(a-d, b-d) < EPS) insertUnique(d); + return result; +} + +// Kürzeste Entfernung zwischen den Strecken a-b und c-d. +double distBetweenSegments(pt a, pt b, pt c, pt d) { + if (segmentIntersection(a, b, c, d)) return 0.0; + return min({distToSegment(a, b, c), distToSegment(a, b, d), + distToSegment(c, d, a), distToSegment(c, d, b)}); +} diff --git a/content/geometry/polygon.cpp b/content/geometry/polygon.cpp new file mode 100644 index 0000000..3178290 --- /dev/null +++ b/content/geometry/polygon.cpp @@ -0,0 +1,150 @@ +// Flächeninhalt eines Polygons (nicht selbstschneidend). +// Punkte gegen den Uhrzeigersinn: positiv, sonst negativ. +double area(const vector& poly) { //poly[0] == poly.back() + ll res = 0; + for (int i = 0; i + 1 < sz(poly); i++) + res += cross(poly[i], poly[i + 1]); + return 0.5 * res; +} + +// Anzahl ccw drehungen einer Polyline um einen Punkt +// p nicht auf rand und poly[0] == poly.back() +// res != 0 or (res & 1) != 0 um inside zu prüfen bei +// selbstschneidenden Polygonen (definitions Sache) +ll windingNumber(pt p, const vector& poly) { + ll res = 0; + for (int i = 0; i + 1 < sz(poly); i++) { + pt a = poly[i], b = poly[i + 1]; + if (real(a) > real(b)) swap(a, b); + if (real(a) <= real(p) && real(p) < real(b) && + cross(p, a, b) < 0) { + res += ccw(p, poly[i], poly[i + 1]); + }} + return res; +} + +// Testet, ob ein Punkt im Polygon liegt (beliebige Polygone). +// Ändere Zeile 32 falls rand zählt, poly[0] == poly.back() +bool inside(pt p, const vector& poly) { + bool in = false; + for (int i = 0; i + 1 < sz(poly); i++) { + pt a = poly[i], b = poly[i + 1]; + if (pointOnLineSegment(a, b, p)) return false; + if (real(a) > real(b)) swap(a,b); + if (real(a) <= real(p) && real(p) < real(b) && + cross(p, a, b) < 0) { + in ^= 1; + }} + return in; +} + +// convex hull without duplicates, h[0] != h.back() +// apply comments if border counts as inside +bool insideConvex(pt p, const vector& hull) { + int l = 0, r = sz(hull) - 1; + if (cross(hull[0], hull[r], p) >= 0) return false; // > 0 + while (l + 1 < r) { + int m = (l + r) / 2; + if (cross(hull[0], hull[m], p) > 0) l = m; // >= 0 + else r = m; + } + return cross(hull[l], hull[r], p) > 0; // >= 0 +} + +void rotateMin(vector& hull) { + auto mi = min_element(all(hull), [](const pt& a, const pt& b){ + return real(a) == real(b) ? imag(a) < imag(b) + : real(a) < real(b); + }); + rotate(hull.begin(), mi, hull.end()); +} + +// convex hulls without duplicates, h[0] != h.back() +vector minkowski(vector ps, vector qs) { + rotateMin(ps); + rotateMin(qs); + ps.push_back(ps[0]); + qs.push_back(qs[0]); + ps.push_back(ps[1]); + qs.push_back(qs[1]); + vector res; + for (ll i = 0, j = 0; i + 2 < sz(ps) || j + 2 < sz(qs);) { + res.push_back(ps[i] + qs[j]); + auto c = cross(ps[i + 1] - ps[i], qs[j + 1] - qs[j]); + if(c >= 0) i++; + if(c <= 0) j++; + } + return res; +} + +// convex hulls without duplicates, h[0] != h.back() +double dist(const vector& ps, vector qs) { + for (pt& q : qs) q *= -1; + auto p = minkowski(ps, qs); + p.push_back(p[0]); + double res = INF; + bool intersect = true; + for (ll i = 0; i + 1 < sz(p); i++) { + intersect &= cross(p[i], p[i+1]) >= 0; + res = min(res, distToSegment(p[i], p[i+1], 0)); + } + return intersect ? 0 : res; +} + +bool left(pt of, pt p) {return cross(p, of) < 0 || + (cross(p, of) == 0 && dot(p, of) > 0);} + +// convex hulls without duplicates, hull[0] == hull.back() and +// hull[0] must be a convex point (with angle < pi) +// returns index of corner where dot(dir, corner) is maximized +int extremal(const vector& hull, pt dir) { + dir *= pt(0, 1); + int l = 0, r = sz(hull) - 1; + while (l + 1 < r) { + int m = (l + r) / 2; + pt dm = hull[m+1]-hull[m]; + pt dl = hull[l+1]-hull[l]; + if (left(dl, dir) != left(dl, dm)) { + if (left(dl, dm)) l = m; + else r = m; + } else { + if (cross(dir, dm) < 0) l = m; + else r = m; + }} + return r % (sz(hull) - 1); +} + +// convex hulls without duplicates, hull[0] == hull.back() and +// hull[0] must be a convex point (with angle < pi) +// {} if no intersection +// {x} if corner is only intersection +// {i, j} segments (i,i+1) and (j,j+1) intersected (if only the +// border is intersected corners i and j are the start and end) +vector intersectLine(const vector& hull, pt a, pt b) { + int endA = extremal(hull, (a-b) * pt(0, 1)); + int endB = extremal(hull, (b-a) * pt(0, 1)); + // cross == 0 => line only intersects border + if (cross(hull[endA], a, b) > 0 || + cross(hull[endB], a, b) < 0) return {}; + + int n = sz(hull) - 1; + vector res; + for (auto _ : {0, 1}) { + int l = endA, r = endB; + if (r < l) r += n; + while (l + 1 < r) { + int m = (l + r) / 2; + if (cross(hull[m % n], a, b) <= 0 && + cross(hull[m % n], a, b) != cross(hull[endB], a, b)) + l = m; + else r = m; + } + if (cross(hull[r % n], a, b) == 0) l++; + res.push_back(l % n); + swap(endA, endB); + swap(a, b); + } + if (res[0] == res[1]) res.pop_back(); + return res; +} + diff --git a/content/geometry/segmentIntersection.cpp b/content/geometry/segmentIntersection.cpp new file mode 100644 index 0000000..4262ddc --- /dev/null +++ b/content/geometry/segmentIntersection.cpp @@ -0,0 +1,63 @@ +struct seg { + pt a, b; + int id; + bool operator<(const seg& o) const { + if (real(a) < real(o.a)) { + int s = ccw(a, b, o.a); + return (s > 0 || (s == 0 && imag(a) < imag(o.a))); + } else if (real(a) > real(o.a)) { + int s = ccw(o.a, o.b, a); + return (s < 0 || (s == 0 && imag(a) < imag(o.a))); + } + return imag(a) < imag(o.a); + } +}; + +struct event { + pt p; + int id, type; + bool operator<(const event& o) const { + if (real(p) != real(o.p)) return real(p) < real(o.p); + if (type != o.type) return type > o.type; + return imag(p) < imag(o.p); + } +}; + +bool lessPT(const pt& a, const pt& b) { + return real(a) != real(b) ? real(a) < real(b) + : imag(a) < imag(b); +} + +bool intersect(const seg& a, const seg& b) { + return lineSegmentIntersection(a.a, a.b, b.a, b.b); +} + +pair intersect(vector& segs) { + vector events; + for (seg& s : segs) { + if (lessPT(s.b, s.a)) swap(s.b, s.a); + events.push_back({s.a, s.id, 1}); + events.push_back({s.b, s.id, -1}); + } + sort(all(events)); + + set q; + vector::iterator> where(sz(segs)); + for (auto e : events) { + int id = e.id; + if (e.type > 0) { + auto it = q.lower_bound(segs[id]); + if (it != q.end() && intersect(*it, segs[id])) + return {it->id, segs[id].id}; + if (it != q.begin() && intersect(*prev(it), segs[id])) + return {prev(it)->id, segs[id].id}; + where[id] = q.insert(it, segs[id]); + } else { + auto it = where[id]; + if (it != q.begin() && next(it) != q.end() && intersect(*next(it), *prev(it))) + return {next(it)->id, prev(it)->id}; + q.erase(it); + } + } + return {-1, -1}; +} diff --git a/content/geometry/sortAround.cpp b/content/geometry/sortAround.cpp new file mode 100644 index 0000000..98d17a8 --- /dev/null +++ b/content/geometry/sortAround.cpp @@ -0,0 +1,11 @@ +bool left(pt p) {return real(p) < 0 || + (real(p) == 0 && imag(p) < 0);} + +// counter clockwise, starting with "11:59" +void sortAround(pt p, vector& ps) { + sort(all(ps), [&](const pt& a, const pt& b){ + if (left(a - p) != left(b - p)) + return left(a - p) > left(b - p); + return cross(p, a, b) > 0; + }); +} diff --git a/content/geometry/spheres.cpp b/content/geometry/spheres.cpp new file mode 100644 index 0000000..ec22262 --- /dev/null +++ b/content/geometry/spheres.cpp @@ -0,0 +1,29 @@ +// Great Circle Distance mit Längen- und Breitengrad. +double gcDist(double pLat, double pLon, + double qLat, double qLon, double radius) { + pLat *= PI / 180; pLon *= PI / 180; + qLat *= PI / 180; qLon *= PI / 180; + return radius * acos(cos(pLat) * cos(pLon) * + cos(qLat) * cos(qLon) + + cos(pLat) * sin(pLon) * + cos(qLat) * sin(qLon) + + sin(pLat) * sin(qLat)); +} + +// Great Circle Distance mit kartesischen Koordinaten. +double gcDist(point p, point q) { + return acos(p.x * q.x + p.y * q.y + p.z * q.z); +} + +// 3D Punkt in kartesischen Koordinaten. +struct point{ + double x, y, z; + point() {} + point(double x, double y, double z) : x(x), y(y), z(z) {} + point(double lat, double lon) { + lat *= PI / 180.0; lon *= PI / 180.0; + x = cos(lat) * sin(lon); + y = cos(lat) * cos(lon); + z = sin(lat); + } +}; diff --git a/content/geometry/triangle.cpp b/content/geometry/triangle.cpp new file mode 100644 index 0000000..534bb10 --- /dev/null +++ b/content/geometry/triangle.cpp @@ -0,0 +1,43 @@ +// Mittelpunkt des Dreiecks abc. +pt centroid(pt a, pt b, pt c) {return (a + b + c) / 3.0;} + +// Flächeninhalt eines Dreicks bei bekannten Eckpunkten. +double area(pt a, pt b, pt c) { + return abs(cross(a, b, c)) / 2.0; +} + +// Flächeninhalt eines Dreiecks bei bekannten Seitenlängen. +double area(double a, double b, double c) { + double s = (a + b + c) / 2.0; //unpräzise + return sqrt(s * (s-a) * (s-b) * (s-c)); +} + +// Zentrum des größten Kreises im Dreiecke +pt inCenter(pt a, pt b, pt c) { + double x = abs(a-b), y = abs(b-c), z = abs(a-c); + return (y*a + z*b + x*c) / (x+y+z); +} + +// Zentrum des Kreises durch alle Eckpunkte +// a, b und c nicht kollinear +pt circumCenter(pt a, pt b, pt c) { + b -= a, c -= a; + pt d = b * norm(c) - c * norm(b); + d = {-d.imag(), d.real()}; + return a + d / cross(b, c) / 2.0; +} + +// -1 => p außerhalb Kreis durch a,b,c +// 0 => p auf Kreis durch a,b,c +// 1 => p im Kreis durch a,b,c +int insideOutCenter(pt a, pt b, pt c, pt p) {// braucht lll + return ccw(a,b,c) * sgn(imag((c-b)*conj(p-c)*(a-p)*conj(b-a))); +} + +// Sind die Dreiecke a1, b1, c1, and a2, b2, c2 ähnlich? +// Erste Zeile testet Ähnlichkeit mit gleicher Orientierung, +// zweite Zeile testet Ähnlichkeit mit verschiedener Orientierung +bool similar(pt a1, pt b1, pt c1, pt a2, pt b2, pt c2) { + return (b2-a2) * (c1-a1) == (b1-a1) * (c2-a2) || + (b2-a2) * conj(c1-a1) == conj(b1-a1) * (c2-a2); +} diff --git a/content/geometry/triangle.tex b/content/geometry/triangle.tex new file mode 100644 index 0000000..3decd54 --- /dev/null +++ b/content/geometry/triangle.tex @@ -0,0 +1,41 @@ + +\begin{minipage}[T]{0.27\linewidth} + Generell: + \begin{itemize} + \item $\cos(\gamma)=\frac{a^2+b^2-c^2}{2ab}$ + \item $b=\frac{a}{\sin(\alpha)}\sin(\beta)$ + %\item $b=\frac{a}{\sin(\pi-\beta-\gamma)}\sin(\beta)$ + %\item $\sin(\beta)=\frac{b\sin(\alpha)}{a}$ %asin is not uniquely invertible + \item $\Delta=\frac{bc}{2}\sin(\alpha)$ + \end{itemize} +\end{minipage} +\hfill +\begin{minipage}[B]{0.5\linewidth} + \centering + \begin{tikzpicture}[line cap=round,minimum size=0,x=.7cm,y=0.7cm] + \node[circle,inner sep=0] (AA) at (0,0) {$A$}; + \node[circle,inner sep=0] (BB) at (3,-1) {$B$}; + \node[circle,inner sep=0] (CC) at (3.666667,1) {$C$}; + + \coordinate (A) at (AA.0); + \coordinate (B) at (BB.100); + \coordinate (C) at (CC.210); + + \pic[draw,angle radius=15,pic text=$\gamma$]{angle = A--C--B}; + \pic[draw,angle radius=15,pic text=$\beta$]{angle = C--B--A}; + \pic[draw,angle radius=20,pic text=$\alpha$]{angle = B--A--C}; + + \draw (A) to[edge label={$b$},inner sep=1] (C); + \draw (A) to[edge label'={$c$},inner sep=1.3] (B); + \draw (B) to[edge label'={$a$},inner sep=0.6] (C); + \end{tikzpicture} +\end{minipage} +\hfill +\begin{minipage}[T]{0.16\linewidth} + $\beta=90^\circ$: + \begin{itemize} + \item $\sin(\alpha)=\frac{a}{b}$ + \item $\cos(\alpha)=\frac{c}{b}$ + \item $\tan(\alpha)=\frac{a}{c}$ + \end{itemize} +\end{minipage} diff --git a/content/graph/2sat.cpp b/content/graph/2sat.cpp new file mode 100644 index 0000000..75e54e6 --- /dev/null +++ b/content/graph/2sat.cpp @@ -0,0 +1,31 @@ +struct sat2 { + int n; // + scc variablen + vector sol; + + sat2(int vars) : n(vars*2), adj(n) {} + + static int var(int i) {return i << 1;} // use this! + + void addImpl(int a, int b) { + adj[a].push_back(b); + adj[1^b].push_back(1^a); + } + void addEquiv(int a, int b) {addImpl(a, b); addImpl(b, a);} + void addOr(int a, int b) {addImpl(1^a, b);} + void addXor(int a, int b) {addOr(a, b); addOr(1^a, 1^b);} + void addTrue(int a) {addImpl(1^a, a);} + void addFalse(int a) {addTrue(1^a);} + void addAnd(int a, int b) {addTrue(a); addTrue(b);} + void addNand(int a, int b) {addOr(1^a, 1^b);} + + bool solve() { + scc(); //scc code von oben + sol.assign(n, -1); + for (int i = 0; i < n; i += 2) { + if (idx[i] == idx[i + 1]) return false; + sol[i] = idx[i] < idx[i + 1]; + sol[i + 1] = !sol[i]; + } + return true; + } +}; diff --git a/content/graph/LCA_sparse.cpp b/content/graph/LCA_sparse.cpp new file mode 100644 index 0000000..221b5ed --- /dev/null +++ b/content/graph/LCA_sparse.cpp @@ -0,0 +1,32 @@ +struct LCA { + vector depth; + vector visited, first; + int idx; + SparseTable st; //sparse table @\sourceref{datastructures/sparseTable.cpp}@ + + void init(vector>& adj, int root) { + depth.assign(2 * sz(adj), 0); + visited.assign(2 * sz(adj), -1); + first.assign(sz(adj), 2 * sz(adj)); + idx = 0; + dfs(adj, root); + st.init(&depth); + } + + void dfs(vector>& adj, int v, ll d=0) { + visited[idx] = v, depth[idx] = d; + first[v] = min(idx, first[v]), idx++; + + for (int u : adj[v]) { + if (first[u] == 2 * sz(adj)) { + dfs(adj, u, d + 1); + visited[idx] = v, depth[idx] = d, idx++; + }}} + + int getLCA(int u, int v) { + if (first[u] > first[v]) swap(u, v); + return visited[st.queryIdempotent(first[u], first[v] + 1)]; + } + + ll getDepth(int v) {return depth[first[v]];} +}; diff --git a/content/graph/TSP.cpp b/content/graph/TSP.cpp new file mode 100644 index 0000000..6223858 --- /dev/null +++ b/content/graph/TSP.cpp @@ -0,0 +1,29 @@ +vector> dist; // Entfernung zwischen je zwei Punkten. + +auto TSP() { + int n = sz(dist), m = 1 << n; + vector> dp(n, vector(m, edge{INF, -1})); + + for (int c = 0; c < n; c++) + dp[c][m-1].dist = dist[c][0], dp[c][m-1].to = 0; + + for (int v = m - 2; v >= 0; v--) { + for (int c = n - 1; c >= 0; c--) { + for (int g = 0; g < n; g++) { + if (g != c && !((1 << g) & v)) { + if ((dp[g][(v | (1 << g))].dist + dist[c][g]) < + dp[c][v].dist) { + dp[c][v].dist = + dp[g][(v | (1 << g))].dist + dist[c][g]; + dp[c][v].to = g; + }}}}} + // return dp[0][1]; // Länge der Tour + + vector tour = {0}; + int v = 0; + while (tour.back() != 0 || sz(tour) == 1) + tour.push_back(dp[tour.back()] + [(v |= (1 << tour.back()))].to); + // Enthält Knoten 0 zweimal. An erster und letzter Position. + return tour; +} diff --git a/content/graph/articulationPoints.cpp b/content/graph/articulationPoints.cpp new file mode 100644 index 0000000..25ff67e --- /dev/null +++ b/content/graph/articulationPoints.cpp @@ -0,0 +1,43 @@ +vector> adj; +vector num; +int counter, rootCount, root; +vector isArt; +vector bridges, st; +vector> bcc; + +int dfs(int v, int from = -1) { + int me = num[v] = ++counter, top = me; + for (Edge& e : adj[v]) { + if (e.id == from) continue; + if (num[e.to]) { + top = min(top, num[e.to]); + if (num[e.to] < me) st.push_back(e); + } else { + if (v == root) rootCount++; + int si = sz(st); + int up = dfs(e.to, e.id); + top = min(top, up); + if (up >= me) isArt[v] = true; + if (up > me) bridges.push_back(e); + if (up <= me) st.push_back(e); + if (up == me) { + bcc.emplace_back(si + all(st)); + st.resize(si); + }}} + return top; +} + +void find() { + counter = 0; + num.assign(sz(adj), 0); + isArt.assign(sz(adj), false); + bridges.clear(); + st.clear(); + bcc.clear(); + for (int v = 0; v < sz(adj); v++) { + if (!num[v]) { + root = v; + rootCount = 0; + dfs(v); + isArt[v] = rootCount > 1; +}}} diff --git a/content/graph/bellmannFord.cpp b/content/graph/bellmannFord.cpp new file mode 100644 index 0000000..09ea1aa --- /dev/null +++ b/content/graph/bellmannFord.cpp @@ -0,0 +1,19 @@ +auto bellmannFord(int n, vector& edges, int start) { + vector dist(n, INF), prev(n, -1); + dist[start] = 0; + + for (int i = 1; i < n; i++) { + for (edge& e : edges) { + if (dist[e.from] != INF && + dist[e.from] + e.cost < dist[e.to]) { + dist[e.to] = dist[e.from] + e.cost; + prev[e.to] = e.from; + }}} + + for (edge& e : edges) { + if (dist[e.from] != INF && + dist[e.from] + e.cost < dist[e.to]) { + // Negativer Kreis gefunden. + }} + return dist; //return prev? +} diff --git a/content/graph/bitonicTSP.cpp b/content/graph/bitonicTSP.cpp new file mode 100644 index 0000000..6470232 --- /dev/null +++ b/content/graph/bitonicTSP.cpp @@ -0,0 +1,31 @@ +vector> dist; // Initialisiere mit Entfernungen zwischen Punkten. + +auto bitonicTSP() { + vector dp(sz(dist), HUGE_VAL); + vector pre(sz(dist)); // nur für Tour + dp[0] = 0; dp[1] = 2 * dist[0][1]; pre[1] = 0; + for (unsigned int i = 2; i < sz(dist); i++) { + double link = 0; + for (int j = i - 2; j >= 0; j--) { + link += dist[j + 1][j + 2]; + double opt = link + dist[j][i] + dp[j + 1] - dist[j][j + 1]; + if (opt < dp[i]) { + dp[i] = opt; + pre[i] = j; + }}} + // return dp.back(); // Länger der Tour + + int j, n = sz(dist) - 1; + vector ut, lt = {n, n - 1}; + do { + j = pre[n]; + (lt.back() == n ? lt : ut).push_back(j); + for (int i = n - 1; i > j + 1; i--) { + (lt.back() == i ? lt : ut).push_back(i - 1); + } + } while(n = j + 1, j > 0); + (lt.back() == 1 ? lt : ut).push_back(0); + reverse(all(lt)); + lt.insert(lt.end(), all(ut)); + return lt;// Enthält Knoten 0 zweimal. An erster und letzter Position. +} diff --git a/content/graph/bitonicTSPsimple.cpp b/content/graph/bitonicTSPsimple.cpp new file mode 100644 index 0000000..8b6e6c5 --- /dev/null +++ b/content/graph/bitonicTSPsimple.cpp @@ -0,0 +1,27 @@ +vector> dist; // Entfernungen zwischen Punkten. +vector> dp; + +double get(int p1, int p2) { + int v = max(p1, p2) + 1; + if (v == sz(dist)) return dist[p1][v - 1] + dist[p2][v - 1]; + if (dp[p1][p2] >= 0.0) return dp[p1][p2]; + double tryLR = dist[p1][v] + get(v, p2); + double tryRL = dist[p2][v] + get(p1, v); + return dp[p1][p2] = min(tryLR, tryRL); +} + +auto bitonicTSP() { + dp = vector>(sz(dist), + vector(sz(dist), -1)); + get(0, 0); + // return dp[0][0]; // Länger der Tour + vector lr = {0}, rl = {0}; + for (int p1 = 0, p2 = 0, v; (v = max(p1, p2)+1) < sz(dist);) { + if (dp[p1][p2] == dist[p1][v] + dp[v][p2]) { + lr.push_back(v); p1 = v; + } else { + rl.push_back(v); p2 = v; + }} + lr.insert(lr.end(), rl.rbegin(), rl.rend()); + return lr;// Enthält Knoten 0 zweimal. An erster und letzter Position. +} diff --git a/content/graph/blossom.cpp b/content/graph/blossom.cpp new file mode 100644 index 0000000..7bd494a --- /dev/null +++ b/content/graph/blossom.cpp @@ -0,0 +1,82 @@ +struct GM { + vector> adj; + // pairs ist der gematchte knoten oder n + vector pairs, first, que; + vector> label; + int head, tail; + + GM(int n) : adj(n), pairs(n + 1, n), first(n + 1, n), + que(n), label(n + 1, {-1, -1}) {} + + void rematch(int u, int v) { + int t = pairs[u]; pairs[u] = v; + if (pairs[t] != u) return; + if (label[u].second == -1) { + pairs[t] = label[u].first; + rematch(pairs[t], t); + } else { + auto [x, y] = label[u]; + rematch(x, y); + rematch(y, x); + }} + + int findFirst(int v) { + return label[first[v]].first < 0 ? first[v] + : first[v] = findFirst(first[v]); + } + + void relabel(int x, int y) { + int r = findFirst(x); + int s = findFirst(y); + if (r == s) return; + auto h = label[r] = label[s] = {~x, y}; + int join; + while (true) { + if (s != sz(adj)) swap(r, s); + r = findFirst(label[pairs[r]].first); + if (label[r] == h) { + join = r; + break; + } else { + label[r] = h; + }} + for (int v : {first[x], first[y]}) { + for (; v != join; v = first[label[pairs[v]].first]) { + label[v] = {x, y}; + first[v] = join; + que[tail++] = v; + }}} + + bool augment(int v) { + label[v] = {sz(adj), -1}; + first[v] = sz(adj); + head = tail = 0; + for (que[tail++] = v; head < tail;) { + int x = que[head++]; + for (int y : adj[x]) { + if (pairs[y] == sz(adj) && y != v) { + pairs[y] = x; + rematch(x, y); + return true; + } else if (label[y].first >= 0) { + relabel(x, y); + } else if (label[pairs[y]].first == -1) { + label[pairs[y]].first = x; + first[pairs[y]] = y; + que[tail++] = pairs[y]; + }}} + return false; + } + + int match() { + int matching = head = tail = 0; + for (int v = 0; v < sz(adj); v++) { + if (pairs[v] < sz(adj) || !augment(v)) continue; + matching++; + for (int i = 0; i < tail; i++) + label[que[i]] = label[pairs[que[i]]] = {-1, -1}; + label[sz(adj)] = {-1, -1}; + } + return matching; + } +}; diff --git a/content/graph/bronKerbosch.cpp b/content/graph/bronKerbosch.cpp new file mode 100644 index 0000000..0cfcc5f --- /dev/null +++ b/content/graph/bronKerbosch.cpp @@ -0,0 +1,24 @@ +using bits = bitset<64>; +vector adj, cliques; + +void addEdge(int a, int b) { + if (a != b) adj[a][b] = adj[b][a] = 1; +} + +void bronKerboschRec(bits R, bits P, bits X) { + if (P.none() && X.none()) { + cliques.push_back(R); + } else { + int q = min(P._Find_first(), X._Find_first()); + bits cands = P & ~adj[q]; + for (int i = 0; i < sz(adj); i++) if (cands[i]) { + R[i] = 1; + bronKerboschRec(R, P & adj[i], X & adj[i]); + R[i] = P[i] = 0; + X[i] = 1; +}}} + +void bronKerbosch() { + cliques.clear(); + bronKerboschRec({}, {(1ull << sz(adj)) - 1}, {}); +} diff --git a/content/graph/centroid.cpp b/content/graph/centroid.cpp new file mode 100644 index 0000000..820945b --- /dev/null +++ b/content/graph/centroid.cpp @@ -0,0 +1,21 @@ +vector s; +void dfs_sz(int v, int from = -1) { + s[v] = 1; + for (int u : adj[v]) if (u != from) { + dfs_sz(u, v); + s[v] += s[u]; +}} + +pair dfs_cent(int v, int from, int n) { + for (int u : adj[v]) if (u != from) { + if (2 * s[u] == n) return {v, u}; + if (2 * s[u] > n) return dfs_cent(u, v, n); + } + return {v, -1}; +} + +pair find_centroid(int root = 0) { + s.resize(sz(adj)); + dfs_sz(root); + return dfs_cent(root, -1, s[root]); +} diff --git a/content/graph/connect.cpp b/content/graph/connect.cpp new file mode 100644 index 0000000..ffcd6c2 --- /dev/null +++ b/content/graph/connect.cpp @@ -0,0 +1,31 @@ +struct connect { + int n; + vector> edges; + LCT lct; // min LCT @\sourceref{datastructures/LCT.cpp}@, no updates required + + connect(int n, int m) : n(n), edges(m), lct(n+m) {} + + bool connected(int u, int v) { + return lct.connected(&lct.nodes[u], &lct.nodes[v]); + } + + void addEdge(int u, int v, int id) { + lct.nodes[id + n] = LCT::Node(id + n, id + n); + edges[id] = {u, v}; + if (connected(u, v)) { + int old = lct.query(&lct.nodes[u], &lct.nodes[v]); + if (old < id) eraseEdge(old); + } + if (!connected(u, v)) { + lct.link(&lct.nodes[u], &lct.nodes[id + n]); + lct.link(&lct.nodes[v], &lct.nodes[id + n]); + }} + + void eraseEdge(ll id) { + if (connected(edges[id].first, edges[id].second) && + lct.query(&lct.nodes[edges[id].first], + &lct.nodes[edges[id].second]) == id) { + lct.cut(&lct.nodes[edges[id].first], &lct.nodes[id + n]); + lct.cut(&lct.nodes[edges[id].second], &lct.nodes[id + n]); + }} +}; diff --git a/content/graph/cycleCounting.cpp b/content/graph/cycleCounting.cpp new file mode 100644 index 0000000..6a299ee --- /dev/null +++ b/content/graph/cycleCounting.cpp @@ -0,0 +1,64 @@ +constexpr int maxEdges = 128; +using cycle = bitset; +struct cycles { + vector>> adj; + vector seen; + vector paths, base; + vector> edges; + + cycles(int n) : adj(n), seen(n), paths(n) {} + + void addEdge(int u, int v) { + adj[u].push_back({v, sz(edges)}); + adj[v].push_back({u, sz(edges)}); + edges.push_back({u, v}); + } + + void addBase(cycle cur) { + for (cycle o : base) { + o ^= cur; + if (o._Find_first() > cur._Find_first()) cur = o; + } + if (cur.any()) base.push_back(cur); + } + + void findBase(int v, int from = -1, cycle cur = {}) { + if (from < 0 && seen[v]) return; + if (seen[v]) { + addBase(cur ^ paths[v]); + } else { + seen[v] = true; + paths[v] = cur; + for (auto [u, id] : adj[v]) { + if (u == from) continue; + cur[id].flip(); + findBase(u, v, cur); + cur[id].flip(); + }}} + + bool isCycle(cycle cur) {//cycle must be constrcuted from base + if (cur.none()) return false; + init(sz(adj)); // union find @\sourceref{datastructures/unionFind.cpp}@ + for (int i = 0; i < sz(edges); i++) { + if (cur[i]) { + cur[i] = false; + if (findSet(edges[i].first) == + findSet(edges[i].second)) break; + unionSets(edges[i].first, edges[i].second); + }} + return cur.none(); + } + + int count() { + for (int i = 0; i < sz(adj); i++) findBase(i); + assert(sz(base) < 30); + int res = 0; + for (int i = 1; i < (1 << sz(base)); i++) { + cycle cur; + for (int j = 0; j < sz(base); j++) + if (((i >> j) & 1) != 0) cur ^= base[j]; + if (isCycle(cur)) res++; + } + return res; + } +}; diff --git a/content/graph/dfs.tex b/content/graph/dfs.tex new file mode 100644 index 0000000..1e6705f --- /dev/null +++ b/content/graph/dfs.tex @@ -0,0 +1,16 @@ +\begin{expandtable} +\begin{tabularx}{\linewidth}{|X|XIXIX|} + \hline + Kantentyp $(v, w)$ & \code{dfs[v] < dfs[w]} & \code{fin[v] > fin[w]} & \code{seen[w]} \\ + %$(v, w)$ & \code{dfs[w]} & \code{fin[w]} & \\ + \hline + in-tree & \code{true} & \code{true} & \code{false} \\ + \grayhline + forward & \code{true} & \code{true} & \code{true} \\ + \grayhline + backward & \code{false} & \code{false} & \code{true} \\ + \grayhline + cross & \code{false} & \code{true} & \code{true} \\ + \hline +\end{tabularx} +\end{expandtable} diff --git a/content/graph/dijkstra.cpp b/content/graph/dijkstra.cpp new file mode 100644 index 0000000..61c636d --- /dev/null +++ b/content/graph/dijkstra.cpp @@ -0,0 +1,21 @@ +using path = pair; //dist, destination + +auto dijkstra(const vector>& adj, int start) { + priority_queue, greater> pq; + vector dist(sz(adj), INF); + vector prev(sz(adj), -1); + dist[start] = 0; pq.emplace(0, start); + + while (!pq.empty()) { + auto [dv, v] = pq.top(); pq.pop(); + if (dv > dist[v]) continue; // WICHTIG! + + for (auto [du, u] : adj[v]) { + ll newDist = dv + du; + if (newDist < dist[u]) { + dist[u] = newDist; + prev[u] = v; + pq.emplace(dist[u], u); + }}} + return dist; //return prev; +} diff --git a/content/graph/dinicScaling.cpp b/content/graph/dinicScaling.cpp new file mode 100644 index 0000000..f4e833a --- /dev/null +++ b/content/graph/dinicScaling.cpp @@ -0,0 +1,51 @@ +struct Edge { + int to, rev; + ll f, c; +}; + +vector> adj; +int s, t; +vector pt, dist; + +void addEdge(int u, int v, ll c) { + adj[u].push_back({v, (int)sz(adj[v]), 0, c}); + adj[v].push_back({u, (int)sz(adj[u]) - 1, 0, 0}); +} + +bool bfs(ll lim) { + dist.assign(sz(adj), -1); + dist[s] = 0; + queue q({s}); + while (!q.empty() && dist[t] < 0) { + int v = q.front(); q.pop(); + for (Edge& e : adj[v]) { + if (dist[e.to] < 0 && e.c - e.f >= lim) { + dist[e.to] = dist[v] + 1; + q.push(e.to); + }}} + return dist[t] >= 0; +} + +bool dfs(int v, ll flow) { + if (v == t) return true; + for (; pt[v] < sz(adj[v]); pt[v]++) { + Edge& e = adj[v][pt[v]]; + if (dist[e.to] != dist[v] + 1) continue; + if (e.c - e.f >= flow && dfs(e.to, flow)) { + e.f += flow; + adj[e.to][e.rev].f -= flow; + return true; + }} + return false; +} + +ll maxFlow(int source, int target) { + s = source, t = target; + ll flow = 0; + for (ll lim = (1LL << 62); lim >= 1; lim /= 2) { + while (bfs(lim)) { + pt.assign(sz(adj), 0); + while (dfs(s, lim)) flow += lim; + }} + return flow; +} diff --git a/content/graph/euler.cpp b/content/graph/euler.cpp new file mode 100644 index 0000000..a5ea192 --- /dev/null +++ b/content/graph/euler.cpp @@ -0,0 +1,23 @@ +vector> idx; +vector to, validIdx, cycle; +vector used; + +void addEdge(int u, int v) { + idx[u].push_back(sz(to)); + to.push_back(v); + used.push_back(false); + idx[v].push_back(sz(to)); // für ungerichtet + to.push_back(u); + used.push_back(false); +} + +void euler(int v) { // init idx und validIdx + for (;validIdx[v] < sz(idx[v]); validIdx[v]++) { + if (!used[idx[v][validIdx[v]]]) { + int u = to[idx[v][validIdx[v]]]; + used[idx[v][validIdx[v]]] = true; + used[idx[v][validIdx[v]] ^ 1] = true; // für ungerichtet + euler(u); + }} + cycle.push_back(v); // Zyklus in umgekehrter Reihenfolge. +} diff --git a/content/graph/floydWarshall.cpp b/content/graph/floydWarshall.cpp new file mode 100644 index 0000000..df096c2 --- /dev/null +++ b/content/graph/floydWarshall.cpp @@ -0,0 +1,27 @@ +vector> dist; // Entfernung zwischen je zwei Punkten. +vector> next; + +void floydWarshall() { + next.assign(sz(dist), vector(sz(dist), -1)); + for (int i = 0; i < sz(dist); i++) { + for (int j = 0; j < sz(dist); j++) { + if (dist[i][j] < INF) { + next[i][j] = j; + }}} + + for (int k = 0; k < sz(dist); k++) { + for (int i = 0; i < sz(dist); i++) { + for (int j = 0; j < sz(dist); j++) { + // only needed if dist can be negative + if (dist[i][k] == INF || dist[k][j] == INF) continue; + if (dist[i][j] > dist[i][k] + dist[k][j]) { + dist[i][j] = dist[i][k] + dist[k][j]; + next[i][j] = next[i][k]; +}}}}} + +vector getPath(int u, int v) { + if (next[u][v] < 0) return {}; + vector path = {u}; + while (u != v) path.push_back(u = next[u][v]); + return path; //Pfad u -> v +} diff --git a/content/graph/graph.tex b/content/graph/graph.tex new file mode 100644 index 0000000..831f4e5 --- /dev/null +++ b/content/graph/graph.tex @@ -0,0 +1,269 @@ +\section{Graphen} + +\begin{algorithm}{Kruskal} + \begin{methods}[ll] + berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\ + \end{methods} + \sourcecode{graph/kruskal.cpp} +\end{algorithm} + +\begin{algorithm}{Minimale Spannbäume} + \paragraph{Schnitteigenschaft} + Für jeden Schnitt $C$ im Graphen gilt: + Gibt es eine Kante $e$, die echt leichter ist als alle anderen Schnittkanten, so gehört diese zu allen minimalen Spannbäumen. + ($\Rightarrow$ Die leichteste Kante in einem Schnitt kann in einem minimalen Spannbaum verwendet werden.) + + \paragraph{Kreiseigenschaft} + Für jeden Kreis $K$ im Graphen gilt: + Die schwerste Kante auf dem Kreis ist nicht Teil des minimalen Spannbaums. +\end{algorithm} + +\begin{algorithm}{Heavy-Light Decomposition} + \begin{methods} + \method{get\_intervals}{gibt Zerlegung des Pfades von $u$ nach $v$}{\log(\abs{V})} + \end{methods} + \textbf{Wichtig:} Intervalle sind halboffen + + Subbaum unter dem Knoten $v$ ist das Intervall $[\text{\code{in[v]}},~\text{\code{out[v]}})$. + \sourcecode{graph/hld.cpp} +\end{algorithm} + +\begin{algorithm}{Lowest Common Ancestor} + \begin{methods} + \method{init}{baut DFS-Baum über $g$ auf}{\abs{V}\*\log(\abs{V})} + \method{getLCA}{findet LCA}{1} + \method{getDepth}{berechnet Distanz zur Wurzel im DFS-Baum}{1} + \end{methods} + \sourcecode{graph/LCA_sparse.cpp} +\end{algorithm} + +\begin{algorithm}{Centroids} + \begin{methods} + \method{find\_centroid}{findet alle Centroids des Baums (maximal 2)}{\abs{V}} + \end{methods} + \sourcecode{graph/centroid.cpp} +\end{algorithm} + +\begin{algorithm}{Eulertouren} + \begin{methods} + \method{euler}{berechnet den Kreis}{\abs{V}+\abs{E}} + \end{methods} + \sourcecode{graph/euler.cpp} + \begin{itemize} + \item Zyklus existiert, wenn jeder Knoten geraden Grad hat (ungerichtet),\\ bei jedem Knoten Ein- und Ausgangsgrad übereinstimmen (gerichtet). + \item Pfad existiert, wenn genau $\{0, 2\}$ Knoten ungeraden Grad haben (ungerichtet),\\ bei allen Knoten Ein- und Ausgangsgrad übereinstimmen oder einer eine Ausgangskante mehr hat (Startknoten) und einer eine Eingangskante mehr hat (Endknoten). + \item \textbf{Je nach Aufgabenstellung überprüfen, wie ein unzusammenhängender Graph interpretiert werden sollen.} + \item Wenn eine bestimmte Sortierung verlangt wird oder Laufzeit vernachlässigbar ist, ist eine Implementierung mit einem \code{vector> adj} leichter + \item \textbf{Wichtig:} Algorithmus schlägt nicht fehl, falls kein Eulerzyklus existiert. + Die Existenz muss separat geprüft werden. + \end{itemize} +\end{algorithm} + +\begin{algorithm}{Baum-Isomorphie} + \begin{methods} + \method{treeLabel}{berechnet kanonischen Namen für einen Baum}{\abs{V}\*\log(\abs{V})} + \end{methods} + \sourcecode{graph/treeIsomorphism.cpp} +\end{algorithm} + +\subsection{Kürzeste Wege} + +\subsubsection{\textsc{Bellmann-Ford}-Algorithmus} +\method{bellmanFord}{kürzeste Pfade oder negative Kreise finden}{\abs{V}\*\abs{E}} +\sourcecode{graph/bellmannFord.cpp} + +\subsubsection{Algorithmus von \textsc{Dijkstra}} +\method{dijkstra}{kürzeste Pfade in Graphen ohne negative Kanten}{\abs{E}\*\log(\abs{V})} +\sourcecode{graph/dijkstra.cpp} + +\subsubsection{\textsc{Floyd-Warshall}-Algorithmus} +\method{floydWarshall}{kürzeste Pfade oder negative Kreise finden}{\abs{V}^3} +\begin{itemize} + \item \code{dist[i][i] = 0, dist[i][j] = edge\{j, j\}.weight} oder \code{INF} + \item \code{i} liegt auf einem negativen Kreis $\Leftrightarrow$ \code{dist[i][i] < 0}. +\end{itemize} +\sourcecode{graph/floydWarshall.cpp} + +\subsubsection{Matrix-Algorithmus} +Sei $d_{i\smash{j}}$ die Distanzmatrix von $G$, dann gibt $d_{i\smash{j}}^k$ die kürzeste Distanz von $i$ nach $j$ mit maximal $k$ kanten an mit der Verknüpfung: $c_{i\smash{j}} = a_{i\smash{j}} \otimes b_{i\smash{j}} = \min\{a_{ik} \cdot b_{k\smash{j}}\}$ + + +Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, dann gibt $a_{i\smash{j}}^k$ die Anzahl der Wege von $i$ nach $j$ mit Länge genau \textcolor{gray}{(maximal)} $k$ an mit der Verknüpfung: $c_{i\smash{j}} = a_{i\smash{j}} \otimes b_{i\smash{j}} = \sum a_{ik} \cdot b_{k\smash{j}}$ + +\begin{algorithm}{Dynamic Connectivity} + \begin{methods} + \method{Constructor}{erzeugt Baum ($n$ Knoten, $m$ updates)}{n+m} + \method{addEdge}{fügt Kannte ein,\code{id}=delete Zeitpunkt}{\log(n)} + \method{eraseEdge}{entfernt Kante \code{id}}{\log(n)} + \end{methods} + \sourcecode{graph/connect.cpp} +\end{algorithm} + +\begin{algorithm}{Erd\H{o}s-Gallai} + Sei $d_1 \geq \cdots \geq d_{n}$. Es existiert genau dann ein Graph $G$ mit Degreesequence $d$ falls $\sum\limits_{i=1}^{n} d_i$ gerade ist und für $1\leq k \leq n$: $\sum\limits_{i=1}^{k} d_i \leq k\cdot(k-1)+\sum\limits_{i=k+1}^{n} \min(d_i, k)$ + \begin{methods} + \method{havelHakimi}{findet Graph}{(\abs{V}+\abs{E})\cdot\log(\abs{V})} + \end{methods} + \sourcecode{graph/havelHakimi.cpp} +\end{algorithm} + +\begin{algorithm}{Strongly Connected Components (\textsc{Tarjan})} + \begin{methods} + \method{scc}{berechnet starke Zusammenhangskomponenten}{\abs{V}+\abs{E}} + \end{methods} + \sourcecode{graph/scc.cpp} +\end{algorithm} + +\begin{algorithm}{DFS} + \input{graph/dfs} +\end{algorithm} + +\begin{algorithm}{Artikulationspunkte, Brücken und BCC} + \begin{methods} + \method{find}{berechnet Artikulationspunkte, Brücken und BCC}{\abs{V}+\abs{E}} + \end{methods} + \textbf{Wichtig:} isolierte Knoten und Brücken sind keine BCC. + \sourcecode{graph/articulationPoints.cpp} +\end{algorithm} +\vfill\null\columnbreak + +\begin{algorithm}{2-SAT} + \sourcecode{graph/2sat.cpp} +\end{algorithm} + +\begin{algorithm}{Maximal Cliques} + \begin{methods} + \method{bronKerbosch}{berechnet alle maximalen Cliquen}{3^\frac{n}{3}} + \method{addEdge}{fügt \textbf{ungerichtete} Kante ein}{1} + \end{methods} + \sourcecode{graph/bronKerbosch.cpp} +\end{algorithm} + +\begin{algorithm}{Cycle Counting} + \begin{methods} + \method{findBase}{berechnet Basis}{\abs{V}\cdot\abs{E}} + \method{count}{zählt Zykel}{2^{\abs{\mathit{base}}}} + \end{methods} + \begin{itemize} + \item jeder Zyklus ist das xor von einträgen in \code{base}. + \end{itemize} + \sourcecode{graph/cycleCounting.cpp} +\end{algorithm} + +\begin{algorithm}{Wert des maximalen Matchings} + Fehlerwahrscheinlichkeit: $\left(\frac{m}{MOD}\right)^I$ + \sourcecode{graph/matching.cpp} +\end{algorithm} + +\begin{algorithm}{Allgemeines maximales Matching} + \begin{methods} + \method{match}{berechnet algemeines Matching}{\abs{E}\*\abs{V}\*\log(\abs{V})} + \end{methods} + \sourcecode{graph/blossom.cpp} +\end{algorithm} + +\begin{algorithm}{Rerooting Template} + \sourcecode{graph/reroot.cpp} +\end{algorithm} + +\begin{algorithm}{Virtual Trees} + \sourcecode{graph/virtualTree.cpp} +\end{algorithm} + +\begin{algorithm}{Maximum Cardinatlity Bipartite Matching} + \label{kuhn} + \begin{methods} + \method{kuhn}{berechnet Matching}{\abs{V}\*\min(ans^2, \abs{E})} + \end{methods} + \begin{itemize} + \item die ersten [0..l) Knoten in \code{adj} sind die linke Seite des Graphen + \end{itemize} + \sourcecode{graph/maxCarBiMatch.cpp} + \begin{methods} + \method{hopcroft\_karp}{berechnet Matching}{\sqrt{\abs{V}}\*\abs{E}} + \end{methods} + \sourcecode{graph/hopcroftKarp.cpp} +\end{algorithm} + +\begin{algorithm}{Global Mincut} + \begin{methods} + \method{stoer\_wagner}{berechnet globalen Mincut}{\abs{V}\abs{E}+\abs{V}^2\*\log(\abs{E})} + \method{merge(a,b)}{merged Knoten $b$ in Knoten $a$}{\abs{E}} + \end{methods} + \textbf{Tipp:} Cut Rekonstruktion mit \code{unionFind} für Partitionierung oder \code{vector} für edge id's im cut. + \sourcecode{graph/stoerWagner.cpp} +\end{algorithm} + +\subsection{Max-Flow} + +\optional{ +\subsubsection{Push Relabel} +\begin{methods} + \method{maxFlow}{gut bei sehr dicht besetzten Graphen.}{\abs{V}^2\*\sqrt{\abs{E}}} + \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} +\end{methods} +\sourcecode{graph/pushRelabel.cpp} +} + +\begin{algorithm}{Min-Cost-Max-Flow} + \begin{methods} + \method{mincostflow}{berechnet Fluss}{\abs{V}^2\cdot\abs{E}^2} + \end{methods} + \sourcecode{graph/minCostMaxFlow.cpp} +\end{algorithm} + +\subsubsection{Dinic's Algorithm mit Capacity Scaling} +\begin{methods} + \method{maxFlow}{doppelt so schnell wie Ford Fulkerson}{\abs{V}^2\cdot\abs{E}} + \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} +\end{methods} +\sourcecode{graph/dinicScaling.cpp} +\vfill\null +\columnbreak + +\optional{ +\subsubsection{Anwendungen} +\begin{itemize} + \item \textbf{Maximum Edge Disjoint Paths}\newline + Finde die maximale Anzahl Pfade von $s$ nach $t$, die keine Kante teilen. + \begin{enumerate} + \item Setze $s$ als Quelle, $t$ als Senke und die Kapazität jeder Kante auf 1. + \item Der maximale Fluss entspricht den unterschiedlichen Pfaden ohne gemeinsame Kanten. + \end{enumerate} + \item \textbf{Maximum Independent Paths}\newline + Finde die maximale Anzahl an Pfaden von $s$ nach $t$, die keinen Knoten teilen. + \begin{enumerate} + \item Setze $s$ als Quelle, $t$ als Senke und die Kapazität jeder Kante \emph{und jedes Knotens} auf 1. + \item Der maximale Fluss entspricht den unterschiedlichen Pfaden ohne gemeinsame Knoten. + \end{enumerate} + \item \textbf{Min-Cut}\newline + Der maximale Fluss ist gleich dem minimalen Schnitt. + Bei Quelle $s$ und Senke $t$, partitioniere in $S$ und $T$. + Zu $S$ gehören alle Knoten, die im Residualgraphen von $s$ aus erreichbar sind (Rückwärtskanten beachten). +\end{itemize} +} + +\begin{algorithm}{Maximum Weight Bipartite Matching} + \begin{methods} + \method{match}{berechnet Matching}{\abs{V}^3} + \end{methods} + \sourcecode{graph/maxWeightBipartiteMatching.cpp} +\end{algorithm} +\vfill\null +\columnbreak + + +\begin{algorithm}[optional]{TSP} + \begin{methods} + \method{TSP}{berechnet eine Tour}{n^2\*2^n} + \end{methods} + \sourcecode{graph/TSP.cpp} +\end{algorithm} + +\begin{algorithm}[optional]{Bitonic TSP} + \begin{methods} + \method{bitonicTSP}{berechnet eine Bitonische Tour}{n^2} + \end{methods} + \sourcecode{graph/bitonicTSPsimple.cpp} +\end{algorithm} + diff --git a/content/graph/havelHakimi.cpp b/content/graph/havelHakimi.cpp new file mode 100644 index 0000000..ac4d67d --- /dev/null +++ b/content/graph/havelHakimi.cpp @@ -0,0 +1,18 @@ +vector> havelHakimi(const vector& deg) { + priority_queue> pq; + for (int i = 0; i < sz(deg); i++) { + if (deg[i] > 0) pq.push({deg[i], i}); + } + vector> adj(sz(deg)); + while (!pq.empty()) { + auto [degV, v] = pq.top(); pq.pop(); + if (sz(pq) < degV) return {}; //impossible + vector> todo(degV); + for (auto& e : todo) e = pq.top(), pq.pop(); + for (auto [degU, u] : todo) { + adj[v].push_back(u); + adj[u].push_back(v); + if (degU > 1) pq.push({degU - 1, u}); + }} + return adj; +} diff --git a/content/graph/hld.cpp b/content/graph/hld.cpp new file mode 100644 index 0000000..65d3f5c --- /dev/null +++ b/content/graph/hld.cpp @@ -0,0 +1,44 @@ +vector> adj; +vector sz, in, out, nxt, par; +int counter; + +void dfs_sz(int v = 0, int from = -1) { + for (auto& u : adj[v]) if (u != from) { + dfs_sz(u, v); + sz[v] += sz[u]; + if (adj[v][0] == from || sz[u] > sz[adj[v][0]]) { + swap(u, adj[v][0]); //changes adj! +}}} + +void dfs_hld(int v = 0, int from = -1) { + par[v] = from; + in[v] = counter++; + for (int u : adj[v]) if (u != from) { + nxt[u] = (u == adj[v][0]) ? nxt[v] : u; + dfs_hld(u, v); + } + out[v] = counter; +} + +void init(int root = 0) { + int n = sz(adj); + sz.assign(n, 1), nxt.assign(n, root), par.assign(n, -1); + in.resize(n), out.resize(n); + counter = 0; + dfs_sz(root); + dfs_hld(root); +} + +template +void for_intervals(int u, int v, F&& f) { + for (;; v = par[nxt[v]]) { + if (in[v] < in[u]) swap(u, v); + f(max(in[u], in[nxt[v]]), in[v] + 1); + if (in[nxt[v]] <= in[u]) return; +}} + +int get_lca(int u, int v) { + for (;; v = par[nxt[v]]) { + if (in[v] < in[u]) swap(u, v); + if (in[nxt[v]] <= in[u]) return u; +}} diff --git a/content/graph/hopcroftKarp.cpp b/content/graph/hopcroftKarp.cpp new file mode 100644 index 0000000..c1f5d1c --- /dev/null +++ b/content/graph/hopcroftKarp.cpp @@ -0,0 +1,47 @@ +vector> adj; +// pairs ist der gematchte Knoten oder -1 +vector pairs, dist, ptr; + +bool bfs(int l) { + queue q; + for(int v = 0; v < l; v++) { + if (pairs[v] < 0) {dist[v] = 0; q.push(v);} + else dist[v] = -1; + } + bool exist = false; + while(!q.empty()) { + int v = q.front(); q.pop(); + for (int u : adj[v]) { + if (pairs[u] < 0) {exist = true; continue;} + if (dist[pairs[u]] < 0) { + dist[pairs[u]] = dist[v] + 1; + q.push(pairs[u]); + }}} + return exist; +} + +bool dfs(int v) { + for (; ptr[v] < sz(adj[v]); ptr[v]++) { + int u = adj[v][ptr[v]]; + if (pairs[u] < 0 || + (dist[pairs[u]] > dist[v] && dfs(pairs[u]))) { + pairs[u] = v; pairs[v] = u; + return true; + }} + return false; +} + +int hopcroft_karp(int l) { // l = #Knoten links + int ans = 0; + pairs.assign(sz(adj), -1); + dist.resize(l); + // Greedy Matching, optionale Beschleunigung. + for (int v = 0; v < l; v++) for (int u : adj[v]) + if (pairs[u] < 0) {pairs[u] = v; pairs[v] = u; ans++; break;} + while(bfs(l)) { + ptr.assign(l, 0); + for(int v = 0; v < l; v++) { + if (pairs[v] < 0) ans += dfs(v); + }} + return ans; +} diff --git a/content/graph/kruskal.cpp b/content/graph/kruskal.cpp new file mode 100644 index 0000000..987d30b --- /dev/null +++ b/content/graph/kruskal.cpp @@ -0,0 +1,9 @@ +sort(all(edges)); +vector mst; +ll cost = 0; +for (Edge& e : edges) { + if (findSet(e.from) != findSet(e.to)) { + unionSets(e.from, e.to); + mst.push_back(e); + cost += e.cost; +}} diff --git a/content/graph/matching.cpp b/content/graph/matching.cpp new file mode 100644 index 0000000..dcaea8c --- /dev/null +++ b/content/graph/matching.cpp @@ -0,0 +1,23 @@ +constexpr int MOD=1'000'000'007, I=10; +vector> adj, mat; + +int max_matching() { + int ans = 0; + mat.assign(sz(adj), {}); + for (int _ = 0; _ < I; _++) { + for (int v = 0; v < sz(adj); v++) { + mat[v].assign(sz(adj), 0); + for (int u : adj[v]) { + if (u < v) { + mat[v][u] = rand() % (MOD - 1) + 1; + mat[u][v] = MOD - mat[v][u]; + }}} + gauss(sz(adj), MOD); //LGS @\sourceref{math/lgsFp.cpp}@ + int rank = 0; + for (auto& row : mat) { + if (*max_element(all(row)) != 0) rank++; + } + ans = max(ans, rank / 2); + } + return ans; +} diff --git a/content/graph/maxCarBiMatch.cpp b/content/graph/maxCarBiMatch.cpp new file mode 100644 index 0000000..e928387 --- /dev/null +++ b/content/graph/maxCarBiMatch.cpp @@ -0,0 +1,25 @@ +vector> adj; +vector pairs; // Der gematchte Knoten oder -1. +vector visited; + +bool dfs(int v) { + if (visited[v]) return false; + visited[v] = true; + for (int u : adj[v]) if (pairs[u] < 0 || dfs(pairs[u])) { + pairs[u] = v; pairs[v] = u; return true; + } + return false; +} + +int kuhn(int l) { // l = #Knoten links. + pairs.assign(sz(adj), -1); + int ans = 0; + // Greedy Matching. Optionale Beschleunigung. + for (int v = 0; v < l; v++) for (int u : adj[v]) + if (pairs[u] < 0) {pairs[u] = v; pairs[v] = u; ans++; break;} + for (int v = 0; v < l; v++) if (pairs[v] < 0) { + visited.assign(l, false); + ans += dfs(v); + } + return ans; // Größe des Matchings. +} diff --git a/content/graph/maxWeightBipartiteMatching.cpp b/content/graph/maxWeightBipartiteMatching.cpp new file mode 100644 index 0000000..a2b0a80 --- /dev/null +++ b/content/graph/maxWeightBipartiteMatching.cpp @@ -0,0 +1,50 @@ +double costs[N_LEFT][N_RIGHT]; + +// Es muss l<=r sein! (sonst Endlosschleife) +double match(int l, int r) { + vector lx(l), ly(r); + //xy is matching from l->r, yx from r->l, or -1 + vector xy(l, -1), yx(r, -1); + vector> slack(r); + + for (int x = 0; x < l; x++) + lx[x] = *max_element(costs[x], costs[x] + r); + for (int root = 0; root < l; root++) { + vector aug(r, -1); + vector s(l); + s[root] = true; + for (int y = 0; y < r; y++) { + slack[y] = {lx[root] + ly[y] - costs[root][y], root}; + } + int y = -1; + while (true) { + double delta = INF; + int x = -1; + for (int yy = 0; yy < r; yy++) { + if (aug[yy] < 0 && slack[yy].first < delta) { + tie(delta, x) = slack[yy]; + y = yy; + }} + if (delta > 0) { + for (int x = 0; x < l; x++) if (s[x]) lx[x] -= delta; + for (int y = 0; y < r; y++) { + if (aug[y] >= 0) ly[y] += delta; + else slack[y].first -= delta; + }} + aug[y] = x; + x = yx[y]; + if (x < 0) break; + s[x] = true; + for (int y = 0; y < r; y++) { + if (aug[y] < 0) { + double alt = lx[x] + ly[y] - costs[x][y]; + if (slack[y].first > alt) { + slack[y] = {alt, x}; + }}}} + while (y >= 0) { + yx[y] = aug[y]; + swap(y, xy[aug[y]]); + }} + return accumulate(all(lx), 0.0) + + accumulate(all(ly), 0.0); // Wert des Matchings +} diff --git a/content/graph/minCostMaxFlow.cpp b/content/graph/minCostMaxFlow.cpp new file mode 100644 index 0000000..14a222c --- /dev/null +++ b/content/graph/minCostMaxFlow.cpp @@ -0,0 +1,66 @@ +constexpr ll INF = 1LL << 60; // Größer als der maximale Fluss. +struct MinCostFlow { + struct edge { + int to; + ll f, cost; + }; + vector edges; + vector> adj; + vector pref, con; + vector dist; + const int s, t; + ll maxflow, mincost; + + MinCostFlow(int n, int source, int target) : + adj(n), s(source), t(target) {}; + + void addEdge(int u, int v, ll c, ll cost) { + adj[u].push_back(sz(edges)); + edges.push_back({v, c, cost}); + adj[v].push_back(sz(edges)); + edges.push_back({u, 0, -cost}); + } + + bool SPFA() { + pref.assign(sz(adj), -1); + dist.assign(sz(adj), INF); + vector inqueue(sz(adj)); + queue queue; + dist[s] = 0; + queue.push(s); + pref[s] = s; + inqueue[s] = true; + while (!queue.empty()) { + int cur = queue.front(); queue.pop(); + inqueue[cur] = false; + for (int id : adj[cur]) { + int to = edges[id].to; + if (edges[id].f > 0 && + dist[to] > dist[cur] + edges[id].cost) { + dist[to] = dist[cur] + edges[id].cost; + pref[to] = cur; + con[to] = id; + if (!inqueue[to]) { + inqueue[to] = true; + queue.push(to); + }}}} + return pref[t] != -1; + } + + void extend() { + ll w = INF; + for (int u = t; pref[u] != u; u = pref[u]) + w = min(w, edges[con[u]].f); + maxflow += w; + mincost += dist[t] * w; + for (int u = t; pref[u] != u; u = pref[u]) { + edges[con[u]].f -= w; + edges[con[u] ^ 1].f += w; + }} + + void mincostflow() { + con.assign(sz(adj), 0); + maxflow = mincost = 0; + while (SPFA()) extend(); + } +}; diff --git a/content/graph/pushRelabel.cpp b/content/graph/pushRelabel.cpp new file mode 100644 index 0000000..73a9eae --- /dev/null +++ b/content/graph/pushRelabel.cpp @@ -0,0 +1,64 @@ +struct Edge { + int to, rev; + ll f, c; +}; + +vector> adj; +vector> hs; +vector ec; +vector cur, H; + +void addEdge(int u, int v, ll c) { + adj[u].push_back({v, (int)sz(adj[v]), 0, c}); + adj[v].push_back({u, (int)sz(adj[u])-1, 0, 0}); +} + +void addFlow(Edge& e, ll f) { + if (ec[e.to] == 0 && f > 0) + hs[H[e.to]].push_back(e.to); + e.f += f; + adj[e.to][e.rev].f -= f; + ec[e.to] += f; + ec[adj[e.to][e.rev].to] -= f; +} + +ll maxFlow(int s, int t) { + int n = sz(adj); + hs.assign(2*n, {}); + ec.assign(n, 0); + cur.assign(n, 0); + H.assign(n, 0); + H[s] = n; + ec[t] = 1;//never set t to active... + vector co(2*n); + co[0] = n - 1; + for (Edge& e : adj[s]) addFlow(e, e.c); + for (int hi = 0;;) { + while (hs[hi].empty()) if (!hi--) return -ec[s]; + int v = hs[hi].back(); + hs[hi].pop_back(); + while (ec[v] > 0) { + if (cur[v] == sz(adj[v])) { + H[v] = 2*n; + for (int i = 0; i < sz(adj[v]); i++) { + Edge& e = adj[v][i]; + if (e.c - e.f > 0 && + H[v] > H[e.to] + 1) { + H[v] = H[e.to] + 1; + cur[v] = i; + }} + co[H[v]]++; + if (!--co[hi] && hi < n) { + for (int i = 0; i < n; i++) { + if (hi < H[i] && H[i] < n) { + co[H[i]]--; + H[i] = n + 1; + }}} + hi = H[v]; + } else { + Edge& e = adj[v][cur[v]]; + if (e.c - e.f > 0 && H[v] == H[e.to] + 1) { + addFlow(adj[v][cur[v]], min(ec[v], e.c - e.f)); + } else { + cur[v]++; +}}}}} diff --git a/content/graph/reroot.cpp b/content/graph/reroot.cpp new file mode 100644 index 0000000..4c6a748 --- /dev/null +++ b/content/graph/reroot.cpp @@ -0,0 +1,62 @@ +// Usual Tree DP can be broken down in 4 steps: +// - Initialize dp[v] = identity +// - Iterate over all children w and take a value for w +// by looking at dp[w] and possibly the edge label of v -> w +// - combine the values of those children +// usually this operation should be commutative and associative +// - finalize the dp[v] after iterating over all children +struct Reroot { + using T = ll; + + // identity element + T E() {} + // x: dp value of child + // e: index of edge going to child + T takeChild(T x, int e) {} + T comb(T x, T y) {} + // called after combining all dp values of children + T fin(T x, int v) {} + + vector>> g; + vector ord, pae; + vector dp; + + T dfs(int v) { + ord.push_back(v); + for (auto [w, e] : g[v]) { + g[w].erase(find(all(g[w]), pair(v, e^1))); + pae[w] = e^1; + dp[v] = comb(dp[v], takeChild(dfs(w), e)); + } + return dp[v] = fin(dp[v], v); + } + + vector solve(int n, vector> edges) { + g.resize(n); + for (int i = 0; i < n-1; i++) { + g[edges[i].first].emplace_back(edges[i].second, 2*i); + g[edges[i].second].emplace_back(edges[i].first, 2*i+1); + } + pae.assign(n, -1); + dp.assign(n, E()); + dfs(0); + vector updp(n, E()), res(n, E()); + for (int v : ord) { + vector pref(sz(g[v])+1), suff(sz(g[v])+1); + if (v != 0) pref[0] = takeChild(updp[v], pae[v]); + for (int i = 0; i < sz(g[v]); i++){ + auto [u, w] = g[v][i]; + pref[i+1] = suff[i] = takeChild(dp[u], w); + pref[i+1] = comb(pref[i], pref[i+1]); + } + for (int i = sz(g[v])-1; i >= 0; i--) { + suff[i] = comb(suff[i], suff[i+1]); + } + for (int i = 0; i < sz(g[v]); i++) { + updp[g[v][i].first] = fin(comb(pref[i], suff[i+1]), v); + } + res[v] = fin(pref.back(), v); + } + return res; + } +}; diff --git a/content/graph/scc.cpp b/content/graph/scc.cpp new file mode 100644 index 0000000..ac9a40b --- /dev/null +++ b/content/graph/scc.cpp @@ -0,0 +1,32 @@ +vector> adj, sccs; +int counter; +vector inStack; +vector low, idx, s; //idx enthält Index der SCC pro Knoten. + +void visit(int v) { + int old = low[v] = counter++; + s.push_back(v); inStack[v] = true; + + for (auto u : adj[v]) { + if (low[u] < 0) visit(u); + if (inStack[u]) low[v] = min(low[v], low[u]); + } + + if (old == low[v]) { + sccs.push_back({}); + for (int u = -1; u != v;) { + u = s.back(); s.pop_back(); inStack[u] = false; + idx[u] = sz(sccs) - 1; + sccs.back().push_back(u); +}}} + +void scc() { + inStack.assign(sz(adj), false); + low.assign(sz(adj), -1); + idx.assign(sz(adj), -1); + sccs.clear(); + + counter = 0; + for (int i = 0; i < sz(adj); i++) { + if (low[i] < 0) visit(i); +}} diff --git a/content/graph/stoerWagner.cpp b/content/graph/stoerWagner.cpp new file mode 100644 index 0000000..97e667a --- /dev/null +++ b/content/graph/stoerWagner.cpp @@ -0,0 +1,53 @@ +struct Edge { + int from, to; + ll cap; +}; + +vector> adj, tmp; +vector erased; + +void merge(int u, int v) { + tmp[u].insert(tmp[u].end(), all(tmp[v])); + tmp[v].clear(); + erased[v] = true; + for (auto& vec : tmp) { + for (Edge& e : vec) { + if (e.from == v) e.from = u; + if (e.to == v) e.to = u; +}}} + +ll stoer_wagner() { + ll res = INF; + tmp = adj; + erased.assign(sz(tmp), false); + for (int i = 1; i < sz(tmp); i++) { + int s = 0; + while (erased[s]) s++; + priority_queue> pq; + pq.push({0, s}); + vector con(sz(tmp)); + ll cur = 0; + vector> state; + while (!pq.empty()) { + int c = pq.top().second; + pq.pop(); + if (con[c] < 0) continue; //already seen + con[c] = -1; + for (auto e : tmp[c]) { + if (con[e.to] >= 0) {//add edge to cut + con[e.to] += e.cap; + pq.push({con[e.to], e.to}); + cur += e.cap; + } else if (e.to != c) {//remove edge from cut + cur -= e.cap; + }} + state.push_back({cur, c}); + } + int t = state.back().second; + state.pop_back(); + if (state.empty()) return 0; //graph is not connected?! + merge(state.back().second, t); + res = min(res, state.back().first); + } + return res; +} diff --git a/content/graph/treeIsomorphism.cpp b/content/graph/treeIsomorphism.cpp new file mode 100644 index 0000000..355fefb --- /dev/null +++ b/content/graph/treeIsomorphism.cpp @@ -0,0 +1,15 @@ +vector> adj; +map, int> known; // dont reset! + +int treeLabel(int v, int from = -1) { + vector children; + for (int u : adj[v]) { + if (u == from) continue; + children.push_back(treeLabel(u, v)); + } + sort(all(children)); + if (known.find(children) == known.end()) { + known[children] = sz(known); + } + return known[children]; +} diff --git a/content/graph/virtualTree.cpp b/content/graph/virtualTree.cpp new file mode 100644 index 0000000..27d2d6c --- /dev/null +++ b/content/graph/virtualTree.cpp @@ -0,0 +1,22 @@ +// needs dfs in- and out- time and lca function +vector in, out; + +void virtualTree(vector ind) { // indices of used nodes + sort(all(ind), [&](int x, int y) {return in[x] < in[y];}); + for (int i = 0, n = sz(ind); i < n - 1; i++) { + ind.push_back(lca(ind[i], ind[i + 1])); + } + sort(all(ind), [&](int x, int y) {return in[x] < in[y];}); + ind.erase(unique(all(ind)), ind.end()); + + int n = ind.size(); + vector> tree(n); + vector st = {0}; + for (int i = 1; i < n; i++) { + while (in[ind[i]] >= out[ind[st.back()]]) st.pop_back(); + tree[st.back()].push_back(i); + st.push_back(i); + } + // virtual directed tree with n nodes, original indices in ind + // weights can be calculated, e.g. with binary lifting +} diff --git a/content/latexHeaders/code.sty b/content/latexHeaders/code.sty new file mode 100644 index 0000000..3ebdda3 --- /dev/null +++ b/content/latexHeaders/code.sty @@ -0,0 +1,141 @@ +% Colors, used for syntax highlighting. +% To print this document, set all colors to black! +\usepackage{xcolor} +\definecolor{safeRed}{HTML}{D7191C} +\definecolor{safeOrange}{HTML}{FFDE71} +\definecolor{safeYellow}{HTML}{FFFFBF} +\definecolor{safeGreen}{HTML}{99CF8F} +\definecolor{safeBlue}{HTML}{2B83BA} + +%try printer friendly colors? +%\colorlet{keyword}{safeBlue} +%\colorlet{string}{safeRed} +%\colorlet{comment}{safeGreen} +%\colorlet{identifier}{black} +\definecolor{type}{HTML}{2750A0} +\definecolor{string}{HTML}{7B3294} +\definecolor{comment}{HTML}{1A9641} +\definecolor{identifier}{HTML}{000000} +\definecolor{keyword}{HTML}{900000} + +% Source code listings. +\usepackage[scaled=0.80]{beramono} + +\usepackage{listings} +\lstset{ + language={[11]C++}, + numbers=left, + stepnumber=1, + numbersep=6pt, + numberstyle=\small, + breaklines=true, + breakautoindent=true, + breakatwhitespace=false, + numberblanklines=true, + postbreak=\space, + tabsize=2, + upquote=true, + basicstyle=\ttfamily\normalsize, + showspaces=false, + showstringspaces=false, + extendedchars=true, + keywordstyle=\color{keyword}\bfseries, + stringstyle=\color{string}\bfseries, + commentstyle=\color{comment}\bfseries\itshape, + identifierstyle=\color{identifier}, + directivestyle=\color{keyword}\bfseries, + emph={auto, int, long, long long, float, double, long double, char, bool, void, ll, ld, pt, lll, __int128, __float128, true, false, this, nullptr, INF, inf, EPS, eps}, + emphstyle=\color{type}\bfseries, + frame=trbl, + aboveskip=3pt, + belowskip=3pt, + deletestring=[b]{'},%fix digit separator but break char highlighting (fixed again with literate) + escapechar=@ + %moredelim=**[is][{\btHL[fill=green!30,draw=red,dashed,thin]}]{@}{@} +} + +\newcommand{\formatChar}[1]{{\color{string}\bfseries\textquotesingle{}#1\textquotesingle{}}} + +% Listings doesn't support UTF8. This is just enough for German umlauts. and commonly used chars +\lstset{literate=% + {'a'}{{\formatChar{a}}}3 + {'z'}{{\formatChar{z}}}3 + {'A'}{{\formatChar{A}}}3 + {'Z'}{{\formatChar{Z}}}3 + {'0'}{{\formatChar{0}}}3 + {'1'}{{\formatChar{1}}}3 + {'\$'}{{\formatChar{\$}}}3 + {'\#'}{{\formatChar{\#}}}3 + {Ö}{{\"O}}1 + {Ä}{{\"A}}1 + {Ü}{{\"U}}1 + {ß}{{\ss}}1 + {ü}{{\"u}}1 + {ä}{{\"a}}1 + {ö}{{\"o}}1 + {~}{{\textasciitilde}}1 +} + +\makeatletter +\let\orig@lstnumber=\thelstnumber +\newcommand\lstresetnumber{\global\let\thelstnumber=\orig@lstnumber} +\let\orig@placelstnumber=\lst@PlaceNumber +\gdef\lst@PlaceNumber{\orig@placelstnumber\lstresetnumber} +\newcommand\lstsettmpnumber[1]{\gdef\thelstnumber{#1}} + +\lst@AddToHook{OnEmptyLine}{% + \ifnum\value{lstnumber}>99 + \lstsettmpnumber{\_\_\_} + \else\ifnum\value{lstnumber}>9 + \lstsettmpnumber{\_\_} + \else + \lstsettmpnumber{\_} + \fi\fi +% \lstsettmpnumber{\_\_\kern-6pt}% + \vspace{-1.75ex}% + \addtocounter{lstnumber}{-1}% +} +% old: (change numberblanklines=false!) +%\lst@AddToHook{OnEmptyLine}{% +% \vspace{\dimexpr\baselineskip+0.5em}% +% \addtocounter{lstnumber}{-1}% +%} + +\newenvironment{btHighlight}[1][] +{\begingroup\tikzset{bt@Highlight@par/.style={#1}}\begin{lrbox}{\@tempboxa}} +{\end{lrbox}\bt@HL@box[bt@Highlight@par]{\@tempboxa}\endgroup} + +\newcommand\btHL[1][]{% + \begin{btHighlight}[#1]\bgroup\aftergroup\bt@HL@endenv% + } + \def\bt@HL@endenv{% + \end{btHighlight}% + \egroup% +} +\newcommand{\bt@HL@box}[2][]{% + \tikz[#1]{% + \pgfpathrectangle{\pgfpoint{1pt}{0pt}}{\pgfpoint{\wd #2}{\ht #2}}% + \pgfusepath{use as bounding box}% + \node[anchor=base west, fill=orange!30,outer sep=0pt,inner xsep=2.2pt, inner ysep=0pt, rounded corners=3pt, minimum height=\ht\strutbox+1pt,#1]{\raisebox{1pt}{\strut}\strut\usebox{#2}}; + }% +} + +\newcommand{\hl}[1]{\btHL[fill=safeOrange,draw=black,thin]{#1}} + +\ifthenelse{\isundefined{\gitorigin}}{}{ + \usepackage{ocgx2} + \usepackage{fontawesome} + \lst@AddToHook{Init}{% + \ifthenelse{\equal{\lst@name}{}}{}{% + \begin{minipage}[t][0pt]{\linewidth}% + \vspace{0pt}% + \hfill% + \begin{ocg}[printocg=never]{Source links}{srclinks}{1}% + \hfill\href{\gitorigin\lst@name}{\faExternalLink}% + \end{ocg}% + \end{minipage}% + }% + } +} +\makeatother + diff --git a/content/latexHeaders/commands.sty b/content/latexHeaders/commands.sty new file mode 100644 index 0000000..edbba1b --- /dev/null +++ b/content/latexHeaders/commands.sty @@ -0,0 +1,56 @@ +% custom commands +\newcommand{\optional}[1]{ + \ifoptional + #1 + \fi} +\newcommand{\runtime}[1]{\ensuremath{\mathcal{O}\left(#1\right)}} +\newcommand{\code}[1]{\lstinline[breaklines=true]{#1}} +\let\codeSafe\lstinline + +\usepackage{tikz} +\usetikzlibrary{angles,quotes} + + +%new environment to define algorithms +\usepackage{ifthen} +\NewDocumentEnvironment{algorithm}{ O{required} m +b }{}{ + \ifthenelse{\equal{#1}{optional}}{% + \optional{ + \needspace{4\baselineskip}% + \subsection{#2\textcolor{gray}{(optional)}}% + #3% + } + }{% + \needspace{4\baselineskip}% + \subsection{#2}% + #3% + } +} + +%\ifthenelse{\equal{#3}{}}{}{\runtime{#3}} + +\newcommand{\sourcecode}[1]{% + \label{code:#1}% + \nobreak% +% \needspace{3\baselineskip}% +% \nopagebreak% + \lstinputlisting{#1}% + \penalty -1000% +} +\newcommand{\sourceref}[1]{{% + \color{comment}\bfseries\itshape{}Seite \pageref{code:#1}% +}} + +\newcommand{\method}[4][]{\texttt{#2}~~#3~~\runtime{#4}#1\par} + +\newenvironment{methods}[1][lll]{% + %\begin{minipage}{\linewidth}% + \renewcommand{\method}[4][]{\texttt{##2}&##3&\ifthenelse{\equal{##4}{}}{}{\runtime{##4}}##1\\}% + \begin{tabular}{@{}#1@{}}% +}{% + \end{tabular}% + %\end{minipage}% + \nobreak% + \needspace{3\baselineskip}% + \nobreak% +} diff --git a/content/latexHeaders/layout.sty b/content/latexHeaders/layout.sty new file mode 100644 index 0000000..096cf23 --- /dev/null +++ b/content/latexHeaders/layout.sty @@ -0,0 +1,82 @@ +% Don't waste space at the page borders. Use two column layout. +\usepackage[ + top=2cm, + bottom=1cm, + left=1cm, + right=1cm, + landscape +]{geometry} + +% Headline and bottomline. +\usepackage{scrlayer-scrpage} +\pagestyle{scrheadings} +\clearscrheadfoot +\ihead{\university} +\chead{\teamname} +\ohead{\pagemark} + +% Shift the title up to waste less space. +\usepackage{titling} +\setlength{\droptitle}{-8em} + +% Multicol layout for the table of contents. +\usepackage{multicol} +\usepackage{multirow} +\usepackage{array} + +% Automatically have table fill horizontal space. +\usepackage{makecell} +\usepackage{tabularx} +\newcolumntype{C}{>{\centering\arraybackslash}X} +\newcolumntype{L}{>{\raggedright\arraybackslash}X} +\newcolumntype{R}{>{\raggedleft\arraybackslash}X} +\newcolumntype{I}{!{\color{lightgray}\vrule}} +\usepackage{colortbl} +\newcommand{\grayhline}{\arrayrulecolor{lightgray}\hline + \arrayrulecolor{black}} + +% Nice table line. +\usepackage{booktabs} + +% Dingbats symbols. +\usepackage{pifont} + +% use less space... +%\usepackage[subtle, sections, indent, leading, charwidths]{savetrees} +\usepackage[moderate,sections]{savetrees} +\RedeclareSectionCommands[ + beforeskip=1pt plus 5pt, + afterskip=0.1pt plus 1.5pt +]{section,subsection,subsubsection} +\RedeclareSectionCommands[ + beforeskip=1pt plus 5pt, + afterskip=-1.2ex +]{paragraph} + +% dont indent paragagraphs +\setlength{\parindent}{0em} +\parskip=0pt + +% dont encourage breaks before lists +\@beginparpenalty=10000 + +% Nice enumerations without wasting space above and below. +\usepackage{relsize} +\usepackage{enumitem} +\setlist{nosep,leftmargin=2ex,labelwidth=1ex,labelsep=1ex} +\setlist[2]{leftmargin=3ex,label=\smaller[2]\ding{228}} +\setlist[3]{leftmargin=3ex,label=\larger\textbf{--}} +\setlist[description]{leftmargin=0pt} + +% decrease space for tables +\tabcolsep=2pt +\setlength\extrarowheight{0.3pt plus 1pt} + +\newenvironment{expandtable}{% + \begin{addmargin}{-3.4pt} +}{% + \end{addmargin} +} + +\usepackage{needspace} +\usepackage{setspace} diff --git a/content/latexHeaders/math.sty b/content/latexHeaders/math.sty new file mode 100644 index 0000000..c34cc99 --- /dev/null +++ b/content/latexHeaders/math.sty @@ -0,0 +1,98 @@ +% For Headlines with math +\usepackage{bm} + +% Display math. +\usepackage{amsmath} +\usepackage{mathtools} +\usepackage{amssymb} +\usepackage{ntheorem} + +%\usepackage{pxfonts} +\usepackage[scaled=0.945,largesc,looser]{newpxtext}%better than pxfonts... +\usepackage[scaled=0.945,bigdelims]{newpxmath} +\let\mathbb\vmathbb + +\DeclareFontFamily{LMX}{npxexx}{} +\DeclareFontShape{LMX}{npxexx}{m}{n}{<-> s * [1.045] zplexx}{} +\DeclareFontShape{LMX}{npxexx}{b}{n}{<-> s * [1.045] zplbexx}{} +%\DeclareFontShape{LMX}{npxexx}{m}{n}{<-> s * [0.78] zplexx}{} +%\DeclareFontShape{LMX}{npxexx}{b}{n}{<-> s * [0.78] zplbexx}{} +\DeclareFontShape{LMX}{npxexx}{bx}{n}{<->ssub * npxexx/b/n}{} + +%\usepackage[scaled=0.91]{XCharter} +%\usepackage[scaled=0.89,type1]{cabin}% sans serif +%\usepackage[charter,varbb,scaled=1.00,noxchvw]{newtxmath} + +%\usepackage{libertine} +%\usepackage[libertine]{newtxmath} + +% New enviroment for remarks. +\theoremstyle{break} +\newtheorem{bem}{Bemerkung} + +% New commands for math operators. +% Binomial coefficients. +\renewcommand{\binom}[2]{ + \Bigl( + \begin{matrix} + #1 \\ + #2 + \end{matrix} + \Bigr) +} +% Euler numbers, first kind. +\newcommand{\eulerI}[2]{ + \Bigl\langle + \begin{matrix} + #1 \\ + #2 + \end{matrix} + \Bigr\rangle +} +% Euler numbers, second kind. +\newcommand{\eulerII}[2]{ + \Bigl\langle\mkern-4mu\Bigl\langle + \begin{matrix} + #1 \\ + #2 + \end{matrix} + \Bigr\rangle\mkern-4mu\Bigr\rangle +} +% Stirling numbers, first kind. +\newcommand{\stirlingI}[2]{ + \Bigl[ + \begin{matrix} + #1 \\ + #2 + \end{matrix} + \Bigr] +} +% Stirling numbers, second kind. +\newcommand{\stirlingII}[2]{ + \Bigl\{ + \begin{matrix} + #1 \\ + #2 + \end{matrix} + \Bigr\} +} +% Legendre symbol. +\newcommand{\legendre}[2]{ + \Bigl( + \dfrac{#1}{#2} + \Bigr) +} +% Expectation values. +\newcommand{\E}{\text{E}} +% Greates common divisor. +\newcommand{\ggT}{\text{ggT}} +% sign for negative values +\newcommand{\sign}{\scalebox{0.66}[1.0]{\( - \)}} +% absolute values +\newcommand{\abs}[1]{\left|#1\right|} +% ceiling function +\newcommand{\ceil}[1]{\left\lceil#1\right\rceil} +% floor function +\newcommand{\floor}[1]{\left\lfloor#1\right\rfloor} +% multiplication +\renewcommand{\*}{\ensuremath{\cdotp}} diff --git a/content/math/berlekampMassey.cpp b/content/math/berlekampMassey.cpp new file mode 100644 index 0000000..29e084f --- /dev/null +++ b/content/math/berlekampMassey.cpp @@ -0,0 +1,31 @@ +constexpr ll mod = 1'000'000'007; +vector BerlekampMassey(const vector& s) { + int n = sz(s), L = 0, m = 0; + vector C(n), B(n), T; + C[0] = B[0] = 1; + + ll b = 1; + for (int i = 0; i < n; i++) { + m++; + ll d = s[i] % mod; + for (int j = 1; j <= L; j++) { + d = (d + C[j] * s[i - j]) % mod; + } + if (!d) continue; + T = C; + ll coef = d * powMod(b, mod-2, mod) % mod; + for (int j = m; j < n; j++) { + C[j] = (C[j] - coef * B[j - m]) % mod; + } + if (2 * L > i) continue; + L = i + 1 - L; + swap(B, T); + b = d; + m = 0; + } + + C.resize(L + 1); + C.erase(C.begin()); + for (auto& x : C) x = (mod - x) % mod; + return C; +} diff --git a/content/math/bigint.cpp b/content/math/bigint.cpp new file mode 100644 index 0000000..1b3b953 --- /dev/null +++ b/content/math/bigint.cpp @@ -0,0 +1,271 @@ +// base and base_digits must be consistent +constexpr ll base = 1'000'000; +constexpr ll base_digits = 6; +struct bigint { + using vll = vector; + vll a; ll sign; + + bigint() : sign(1) {} + + bigint(ll v) {*this = v;} + + bigint(const string &s) {read(s);} + + void operator=(ll v) { + sign = 1; + if (v < 0) sign = -1, v = -v; + a.clear(); + for (; v > 0; v = v / base) + a.push_back(v % base); + } + + bigint operator+(const bigint& v) const { + if (sign == v.sign) { + bigint res = v; + for (ll i = 0, carry = 0; i < max(sz(a), sz(v.a)) || carry; ++i) { + if (i == sz(res.a)) + res.a.push_back(0); + res.a[i] += carry + (i < sz(a) ? a[i] : 0); + carry = res.a[i] >= base; + if (carry) + res.a[i] -= base; + } + return res; + } + return *this - (-v); + } + + bigint operator-(const bigint& v) const { + if (sign == v.sign) { + if (abs() >= v.abs()) { + bigint res = *this; + for (ll i = 0, carry = 0; i < sz(v.a) || carry; ++i) { + res.a[i] -= carry + (i < sz(v.a) ? v.a[i] : 0); + carry = res.a[i] < 0; + if (carry) res.a[i] += base; + } + res.trim(); + return res; + } + return -(v - *this); + } + return *this + (-v); + } + + void operator*=(ll v) { + if (v < 0) sign = -sign, v = -v; + for (ll i = 0, carry = 0; i < sz(a) || carry; ++i) { + if (i == sz(a)) a.push_back(0); + ll cur = a[i] * v + carry; + carry = cur / base; + a[i] = cur % base; + } + trim(); + } + + bigint operator*(ll v) const { + bigint res = *this; + res *= v; + return res; + } + + friend pair divmod(const bigint& a1, const bigint& b1) { + ll norm = base / (b1.a.back() + 1); + bigint a = a1.abs() * norm; + bigint b = b1.abs() * norm; + bigint q, r; + q.a.resize(sz(a.a)); + for (ll i = sz(a.a) - 1; i >= 0; i--) { + r *= base; + r += a.a[i]; + ll s1 = sz(r.a) <= sz(b.a) ? 0 : r.a[sz(b.a)]; + ll s2 = sz(r.a) <= sz(b.a) - 1 ? 0 : r.a[sz(b.a) - 1]; + ll d = (base * s1 + s2) / b.a.back(); + r -= b * d; + while (r < 0) r += b, --d; + q.a[i] = d; + } + q.sign = a1.sign * b1.sign; + r.sign = a1.sign; + q.trim(); + r.trim(); + return make_pair(q, r / norm); + } + + bigint operator/(const bigint& v) const { + return divmod(*this, v).first; + } + + bigint operator%(const bigint& v) const { + return divmod(*this, v).second; + } + + void operator/=(ll v) { + if (v < 0) sign = -sign, v = -v; + for (ll i = sz(a) - 1, rem = 0; i >= 0; --i) { + ll cur = a[i] + rem * base; + a[i] = cur / v; + rem = cur % v; + } + trim(); + } + + bigint operator/(ll v) const { + bigint res = *this; + res /= v; + return res; + } + + ll operator%(ll v) const { + if (v < 0) v = -v; + ll m = 0; + for (ll i = sz(a) - 1; i >= 0; --i) + m = (a[i] + m * base) % v; + return m * sign; + } + + void operator+=(const bigint& v) { + *this = *this + v; + } + void operator-=(const bigint& v) { + *this = *this - v; + } + void operator*=(const bigint& v) { + *this = *this * v; + } + void operator/=(const bigint& v) { + *this = *this / v; + } + + bool operator<(const bigint& v) const { + if (sign != v.sign) return sign < v.sign; + if (sz(a) != sz(v.a)) + return sz(a) * sign < sz(v.a) * v.sign; + for (ll i = sz(a) - 1; i >= 0; i--) + if (a[i] != v.a[i]) + return a[i] * sign < v.a[i] * sign; + return false; + } + + bool operator>(const bigint& v) const { + return v < *this; + } + bool operator<=(const bigint& v) const { + return !(v < *this); + } + bool operator>=(const bigint& v) const { + return !(*this < v); + } + bool operator==(const bigint& v) const { + return !(*this < v) && !(v < *this); + } + bool operator!=(const bigint& v) const { + return *this < v || v < *this; + } + + void trim() { + while (!a.empty() && !a.back()) a.pop_back(); + if (a.empty()) sign = 1; + } + + bool isZero() const { + return a.empty() || (sz(a) == 1 && a[0] == 0); + } + + bigint operator-() const { + bigint res = *this; + res.sign = -sign; + return res; + } + + bigint abs() const { + bigint res = *this; + res.sign *= res.sign; + return res; + } + + ll longValue() const { + ll res = 0; + for (ll i = sz(a) - 1; i >= 0; i--) + res = res * base + a[i]; + return res * sign; + } + + void read(const string& s) { + sign = 1; + a.clear(); + ll pos = 0; + while (pos < sz(s) && (s[pos] == '-' || s[pos] == '+')) { + if (s[pos] == '-') sign = -sign; + ++pos; + } + for (ll i = sz(s) - 1; i >= pos; i -= base_digits) { + ll x = 0; + for (ll j = max(pos, i - base_digits + 1); j <= i; j++) + x = x * 10 + s[j] - '0'; + a.push_back(x); + } + trim(); + } + + friend istream& operator>>(istream& stream, bigint& v) { + string s; + stream >> s; + v.read(s); + return stream; + } + + friend ostream& operator<<(ostream& stream, const bigint& v) { + if (v.sign == -1) stream << '-'; + stream << (v.a.empty() ? 0 : v.a.back()); + for (ll i = sz(v.a) - 2; i >= 0; --i) + stream << setw(base_digits) << setfill('0') << v.a[i]; + return stream; + } + + static vll karatsubaMultiply(const vll& a, const vll& b) { + ll n = sz(a); + vll res(n + n); + if (n <= 32) { + for (ll i = 0; i < n; i++) + for (ll j = 0; j < n; j++) + res[i + j] += a[i] * b[j]; + return res; + } + ll k = n >> 1; + vll a1(a.begin(), a.begin() + k); + vll a2(a.begin() + k, a.end()); + vll b1(b.begin(), b.begin() + k); + vll b2(b.begin() + k, b.end()); + vll a1b1 = karatsubaMultiply(a1, b1); + vll a2b2 = karatsubaMultiply(a2, b2); + for (ll i = 0; i < k; i++) a2[i] += a1[i]; + for (ll i = 0; i < k; i++) b2[i] += b1[i]; + vll r = karatsubaMultiply(a2, b2); + for (ll i = 0; i < sz(a1b1); i++) r[i] -= a1b1[i]; + for (ll i = 0; i < sz(a2b2); i++) r[i] -= a2b2[i]; + for (ll i = 0; i < sz(r); i++) res[i + k] += r[i]; + for (ll i = 0; i < sz(a1b1); i++) res[i] += a1b1[i]; + for (ll i = 0; i < sz(a2b2); i++) res[i + n] += a2b2[i]; + return res; + } + + bigint operator*(const bigint& v) const { + vll ta(a.begin(), a.end()); + vll va(v.a.begin(), v.a.end()); + while (sz(ta) < sz(va)) ta.push_back(0); + while (sz(va) < sz(ta)) va.push_back(0); + while (sz(ta) & (sz(ta) - 1)) + ta.push_back(0), va.push_back(0); + vll ra = karatsubaMultiply(ta, va); + bigint res; + res.sign = sign * v.sign; + for (ll i = 0, carry = 0; i < sz(ra); i++) { + ll cur = ra[i] + carry; + res.a.push_back(cur % base); + carry = cur / base; + } + res.trim(); + return res; + } +}; diff --git a/content/math/binomial0.cpp b/content/math/binomial0.cpp new file mode 100644 index 0000000..5f2ccaa --- /dev/null +++ b/content/math/binomial0.cpp @@ -0,0 +1,14 @@ +constexpr ll lim = 10'000'000; +ll fac[lim], inv[lim]; + +void precalc() { + fac[0] = inv[0] = 1; + for (int i = 1; i < lim; i++) fac[i] = fac[i-1] * i % mod; + inv[lim - 1] = multInv(fac[lim - 1], mod); + for (int i = lim - 1; i > 0; i--) inv[i-1] = inv[i] * i % mod; +} + +ll calc_binom(ll n, ll k) { + if (n < 0 || n < k || k < 0) return 0; + return (inv[k] * inv[n-k] % mod) * fac[n] % mod; +} diff --git a/content/math/binomial1.cpp b/content/math/binomial1.cpp new file mode 100644 index 0000000..dab20b3 --- /dev/null +++ b/content/math/binomial1.cpp @@ -0,0 +1,8 @@ +ll calc_binom(ll n, ll k) { + if (k > n) return 0; + ll r = 1; + for (ll d = 1; d <= k; d++) {// Reihenfolge => Teilbarkeit + r *= n--, r /= d; + } + return r; +} diff --git a/content/math/binomial2.cpp b/content/math/binomial2.cpp new file mode 100644 index 0000000..4531505 --- /dev/null +++ b/content/math/binomial2.cpp @@ -0,0 +1,32 @@ +constexpr ll mod = 1'000'000'009; + +ll binomPPow(ll n, ll k, ll p) { + ll res = 1; + if (p > n) { + } else if (p > n - k || (p * p > n && n % p < k % p)) { + res *= p; + res %= mod; + } else if (p * p <= n) { + ll c = 0, tmpN = n, tmpK = k; + while (tmpN > 0) { + if (tmpN % p < tmpK % p + c) { + res *= p; + res %= mod; + c = 1; + } else c = 0; + tmpN /= p; + tmpK /= p; + }} + return res; +} + +ll calc_binom(ll n, ll k) { + if (k > n) return 0; + ll res = 1; + k = min(k, n - k); + for (ll i = 0; primes[i] <= n; i++) { + res *= binomPPow(n, k, primes[i]); + res %= mod; + } + return res; +} diff --git a/content/math/binomial3.cpp b/content/math/binomial3.cpp new file mode 100644 index 0000000..7a6ab4e --- /dev/null +++ b/content/math/binomial3.cpp @@ -0,0 +1,10 @@ +ll calc_binom(ll n, ll k, ll p) { + assert(n < p); //wichtig: sonst falsch! + if (k > n) return 0; + ll x = k % 2 != 0 ? p-1 : 1; + for (ll c = p-1; c > n; c--) { + x *= c - k; x %= p; + x *= multInv(c, p); x %= p; + } + return x; +} diff --git a/content/math/chineseRemainder.cpp b/content/math/chineseRemainder.cpp new file mode 100644 index 0000000..ccbc5dc --- /dev/null +++ b/content/math/chineseRemainder.cpp @@ -0,0 +1,14 @@ +struct CRT { + using lll = __int128; + lll M = 1, sol = 0; // Solution unique modulo M + bool hasSol = true; + + // Adds congruence x = a (mod m) + void add(ll a, ll m) { + auto [d, s, t] = extendedEuclid(M, m); + if((a - sol) % d != 0) hasSol = false; + lll z = M/d * s; + M *= m/d; + sol = (z % M * (a-sol) % M + sol + M) % M; + } +}; diff --git a/content/math/cycleDetection.cpp b/content/math/cycleDetection.cpp new file mode 100644 index 0000000..5e68c0c --- /dev/null +++ b/content/math/cycleDetection.cpp @@ -0,0 +1,18 @@ +pair cycleDetection(ll x0, function f) { + ll a = x0, b = f(x0), length = 1; + for (ll power = 1; a != b; b = f(b), length++) { + if (power == length) { + power *= 2; + length = 0; + a = b; + }} + ll start = 0; + a = x0; b = x0; + for (ll i = 0; i < length; i++) b = f(b); + while (a != b) { + a = f(a); + b = f(b); + start++; + } + return {start, length}; +} diff --git a/content/math/discreteLogarithm.cpp b/content/math/discreteLogarithm.cpp new file mode 100644 index 0000000..68866e0 --- /dev/null +++ b/content/math/discreteLogarithm.cpp @@ -0,0 +1,17 @@ +ll dlog(ll a, ll b, ll m) { //a > 0! + ll bound = sqrtl(m) + 1; //memory usage bound < p + vector> vals(bound); + for (ll i = 0, e = 1; i < bound; i++, e = (e * a) % m) { + vals[i] = {e, i}; + } + vals.emplace_back(m, 0); + sort(all(vals)); + ll fact = powMod(a, m - bound - 1, m); + + for (ll i = 0; i < m; i += bound, b = (b * fact) % m) { + auto it = lower_bound(all(vals), pair{b, 0}); + if (it->first == b) { + return (i + it->second) % m; + }} + return -1; +} diff --git a/content/math/discreteNthRoot.cpp b/content/math/discreteNthRoot.cpp new file mode 100644 index 0000000..403cb3b --- /dev/null +++ b/content/math/discreteNthRoot.cpp @@ -0,0 +1,5 @@ +ll root(ll a, ll b, ll m) { // a > 0! + ll g = findPrimitive(m); + ll c = dlog(powMod(g, a, m), b, m); + return c < 0 ? -1 : powMod(g, c, m); +} diff --git a/content/math/divisors.cpp b/content/math/divisors.cpp new file mode 100644 index 0000000..5afd4fb --- /dev/null +++ b/content/math/divisors.cpp @@ -0,0 +1,11 @@ +ll countDivisors(ll n) { + ll res = 1; + for (ll i = 2; i * i * i <= n; i++) { + ll c = 0; + while (n % i == 0) {n /= i; c++;} + res *= c + 1; + } + if (isPrime(n)) res *= 2; + else if (n > 1) res *= isSquare(n) ? 3 : 4; + return res; +} diff --git a/content/math/extendedEuclid.cpp b/content/math/extendedEuclid.cpp new file mode 100644 index 0000000..ecf4a16 --- /dev/null +++ b/content/math/extendedEuclid.cpp @@ -0,0 +1,6 @@ +// a*x + b*y = ggt(a, b) +array extendedEuclid(ll a, ll b) { + if (a == 0) return {b, 0, 1}; + auto [d, x, y] = extendedEuclid(b % a, a); + return {d, y - (b / a) * x, x}; +} diff --git a/content/math/gauss.cpp b/content/math/gauss.cpp new file mode 100644 index 0000000..8129fd2 --- /dev/null +++ b/content/math/gauss.cpp @@ -0,0 +1,36 @@ +void normalLine(int line) { + double factor = mat[line][line]; + for (double& x : mat[line]) x /= factor; +} + +void takeAll(int n, int line) { + for (int i = 0; i < n; i++) { + if (i == line) continue; + double diff = mat[i][line]; + for (int j = 0; j < sz(mat[i]); j++) { + mat[i][j] -= diff * mat[line][j]; +}}} + +int gauss(int n) { + vector done(n, false); + for (int i = 0; i < n; i++) { + int swappee = i; // Sucht Pivotzeile für bessere Stabilität. + for (int j = 0; j < n; j++) { + if (done[j]) continue; + if (abs(mat[j][i]) > abs(mat[i][i])) swappee = j; + } + swap(mat[i], mat[swappee]); + if (abs(mat[i][i]) > EPS) { + normalLine(i); + takeAll(n, i); + done[i] = true; + }} + // Ab jetzt nur checks bzgl. Eindeutigkeit/Existenz der Lösung. + for (int i = 0; i < n; i++) { + bool allZero = true; + for (int j = i; j < n; j++) allZero &= abs(mat[i][j]) <= EPS; + if (allZero && abs(mat[i][n]) > EPS) return INCONSISTENT; + if (allZero && abs(mat[i][n]) <= EPS) return MULTIPLE; + } + return UNIQUE; +} diff --git a/content/math/gcd-lcm.cpp b/content/math/gcd-lcm.cpp new file mode 100644 index 0000000..a1c63c8 --- /dev/null +++ b/content/math/gcd-lcm.cpp @@ -0,0 +1,2 @@ +ll gcd(ll a, ll b) {return b == 0 ? a : gcd(b, a % b);} +ll lcm(ll a, ll b) {return a * (b / gcd(a, b));} diff --git a/content/math/goldenSectionSearch.cpp b/content/math/goldenSectionSearch.cpp new file mode 100644 index 0000000..28ee4c3 --- /dev/null +++ b/content/math/goldenSectionSearch.cpp @@ -0,0 +1,15 @@ +template +ld gss(ld l, ld r, F&& f) { + ld inv = (sqrt(5.0l) - 1) / 2; + ld x1 = r - inv*(r-l), x2 = l + inv*(r-l); + ld f1 = f(x1), f2 = f(x2); + for (int i = 0; i < 200; i++) { + if (f1 < f2) { //change to > to find maximum + r = x2; x2 = x1; f2 = f1; + x1 = r - inv*(r-l); f1 = f(x1); + } else { + l = x1; x1 = x2; f1 = f2; + x2 = l + inv*(r-l); f2 = f(x2); + }} + return l; +} diff --git a/content/math/inversions.cpp b/content/math/inversions.cpp new file mode 100644 index 0000000..9e47f9b --- /dev/null +++ b/content/math/inversions.cpp @@ -0,0 +1,9 @@ +ll inversions(const vector& v) { + Tree> t; //ordered statistics tree @\sourceref{datastructures/pbds.cpp}@ + ll res = 0; + for (ll i = 0; i < sz(v); i++) { + res += i - t.order_of_key({v[i], i}); + t.insert({v[i], i}); + } + return res; +} diff --git a/content/math/inversionsMerge.cpp b/content/math/inversionsMerge.cpp new file mode 100644 index 0000000..8235b11 --- /dev/null +++ b/content/math/inversionsMerge.cpp @@ -0,0 +1,27 @@ +// Laufzeit: O(n*log(n)) +ll merge(vector& v, vector& left, vector& right) { + int a = 0, b = 0, i = 0; + ll inv = 0; + while (a < sz(left) && b < sz(right)) { + if (left[a] < right[b]) v[i++] = left[a++]; + else { + inv += sz(left) - a; + v[i++] = right[b++]; + } + } + while (a < sz(left)) v[i++] = left[a++]; + while (b < sz(right)) v[i++] = right[b++]; + return inv; +} + +ll mergeSort(vector &v) { // Sortiert v und gibt Inversionszahl zurück. + int n = sz(v); + vector left(n / 2), right((n + 1) / 2); + for (int i = 0; i < n / 2; i++) left[i] = v[i]; + for (int i = n / 2; i < n; i++) right[i - n / 2] = v[i]; + + ll result = 0; + if (sz(left) > 1) result += mergeSort(left); + if (sz(right) > 1) result += mergeSort(right); + return result + merge(v, left, right); +} diff --git a/content/math/kthperm.cpp b/content/math/kthperm.cpp new file mode 100644 index 0000000..504f09c --- /dev/null +++ b/content/math/kthperm.cpp @@ -0,0 +1,14 @@ +vector kthperm(ll n, ll k) { + Tree t; + vector res(n); + for (ll i = 1; i <= n; k /= i, i++) { + t.insert(i - 1); + res[n - i] = k % i; + } + for (ll& x : res) { + auto it = t.find_by_order(x); + x = *it; + t.erase(it); + } + return res; +} diff --git a/content/math/legendre.cpp b/content/math/legendre.cpp new file mode 100644 index 0000000..b85ea2a --- /dev/null +++ b/content/math/legendre.cpp @@ -0,0 +1,4 @@ +ll legendre(ll a, ll p) { // p prim >= 2 + ll s = powMod(a, p / 2, p); + return s < 2 ? s : -1ll; +} diff --git a/content/math/lgsFp.cpp b/content/math/lgsFp.cpp new file mode 100644 index 0000000..0241742 --- /dev/null +++ b/content/math/lgsFp.cpp @@ -0,0 +1,26 @@ +void normalLine(int line, ll p) { + ll factor = multInv(mat[line][line], p); + for (ll& x : mat[line]) x = (x * factor) % p; +} + +void takeAll(int n, int line, ll p) { + for (int i = 0; i < n; i++) { + if (i == line) continue; + ll diff = mat[i][line]; + for (int j = 0; j < sz(mat[i]); j++) { + mat[i][j] -= (diff * mat[line][j]) % p; + mat[i][j] = (mat[i][j] + p) % p; +}}} + +void gauss(int n, ll mod) { + vector done(n, false); + for (int i = 0; i < n; i++) { + int j = 0; + while (j < n && (done[j] || mat[j][i] == 0)) j++; + if (j == n) continue; + swap(mat[i], mat[j]); + normalLine(i, mod); + takeAll(n, i, mod); + done[i] = true; +}} +// für Eindeutigkeit, Existenz etc. siehe LGS über R @\sourceref{math/gauss.cpp}@ diff --git a/content/math/linearCongruence.cpp b/content/math/linearCongruence.cpp new file mode 100644 index 0000000..cdb5a37 --- /dev/null +++ b/content/math/linearCongruence.cpp @@ -0,0 +1,5 @@ +ll solveLinearCongruence(ll a, ll b, ll m) { + ll g = gcd(a, m); + if (b % g != 0) return -1; + return ((b / g) * multInv(a / g, m / g)) % (m / g); +} diff --git a/content/math/linearRecurence.cpp b/content/math/linearRecurence.cpp new file mode 100644 index 0000000..2501e64 --- /dev/null +++ b/content/math/linearRecurence.cpp @@ -0,0 +1,33 @@ +constexpr ll mod = 1'000'000'007; +vector modMul(const vector& a, const vector& b, + const vector& c) { + ll n = sz(c); + vector res(n * 2 + 1); + for (int i = 0; i <= n; i++) { //a*b + for (int j = 0; j <= n; j++) { + res[i + j] += a[i] * b[j]; + res[i + j] %= mod; + }} + for (int i = 2 * n; i > n; i--) { //res%c + for (int j = 0; j < n; j++) { + res[i - 1 - j] += res[i] * c[j]; + res[i - 1 - j] %= mod; + }} + res.resize(n + 1); + return res; +} + +ll kthTerm(const vector& f, const vector& c, ll k) { + assert(sz(f) == sz(c)); + vector tmp(sz(c) + 1), a(sz(c) + 1); + tmp[0] = a[1] = 1; //tmp = (x^k) % c + + for (k++; k > 0; k /= 2) { + if (k & 1) tmp = modMul(tmp, a, c); + a = modMul(a, a, c); + } + + ll res = 0; + for (int i = 0; i < sz(c); i++) res += (tmp[i+1] * f[i]) % mod; + return res % mod; +} diff --git a/content/math/linearSieve.cpp b/content/math/linearSieve.cpp new file mode 100644 index 0000000..64440dd --- /dev/null +++ b/content/math/linearSieve.cpp @@ -0,0 +1,50 @@ +constexpr ll N = 10'000'000; +ll small[N], power[N], sieved[N]; +vector primes; + +//wird aufgerufen mit (p^k, p, k) für prime p und k > 0 +ll mu(ll pk, ll p, ll k) {return -(k == 1);} +ll phi(ll pk, ll p, ll k) {return pk - pk / p;} +ll div(ll pk, ll p, ll k) {return k+1;} +ll divSum(ll pk, ll p, ll k) {return (pk*p-1) / (p - 1);} +ll square(ll pk, ll p, ll k) {return k % 2 ? pk / p : pk;} +ll squareFree(ll pk, ll p, ll k) {return p;} + +void sieve() { // O(N) + small[1] = power[1] = sieved[1] = 1; + for (ll i = 2; i < N; i++) { + if (small[i] == 0) { + primes.push_back(i); + for (ll pk = i, k = 1; pk < N; pk *= i, k++) { + small[pk] = i; + power[pk] = pk; + sieved[pk] = mu(pk, i, k); // Aufruf ändern! + }} + for (ll j=0; i*primes[j] < N && primes[j] < small[i]; j++) { + ll k = i * primes[j]; + small[k] = power[k] = primes[j]; + sieved[k] = sieved[i] * sieved[primes[j]]; + } + if (i * small[i] < N && power[i] != i) { + ll k = i * small[i]; + small[k] = small[i]; + power[k] = power[i] * small[i]; + sieved[k] = sieved[power[k]] * sieved[k / power[k]]; +}}} + +ll naive(ll n) { // O(sqrt(n)) + ll res = 1; + for (ll p = 2; p * p <= n; p++) { + if (n % p == 0) { + ll pk = 1; + ll k = 0; + do { + n /= p; + pk *= p; + k++; + } while (n % p == 0); + res *= mu(pk, p, k); // Aufruf ändern! + }} + if (n > 1) res *= mu(n, n, 1); + return res; +} diff --git a/content/math/longestIncreasingSubsequence.cpp b/content/math/longestIncreasingSubsequence.cpp new file mode 100644 index 0000000..fcb63b4 --- /dev/null +++ b/content/math/longestIncreasingSubsequence.cpp @@ -0,0 +1,17 @@ +vector lis(vector& a) { + int n = sz(a), len = 0; + vector dp(n, INF), dp_id(n), prev(n); + for (int i = 0; i < n; i++) { + int pos = lower_bound(all(dp), a[i]) - dp.begin(); + dp[pos] = a[i]; + dp_id[pos] = i; + prev[i] = pos ? dp_id[pos - 1] : -1; + len = max(len, pos + 1); + } + // reconstruction + vector res(len); + for (int x = dp_id[len-1]; len--; x = prev[x]) { + res[len] = x; + } + return res; // indices of one LIS +} diff --git a/content/math/math.tex b/content/math/math.tex new file mode 100644 index 0000000..f99d0d4 --- /dev/null +++ b/content/math/math.tex @@ -0,0 +1,525 @@ +\section{Mathe} + +\begin{algorithm}{Longest Increasing Subsequence} + \begin{itemize} + \item \code{lower\_bound} $\Rightarrow$ streng monoton + \item \code{upper\_bound} $\Rightarrow$ monoton + \end{itemize} + \sourcecode{math/longestIncreasingSubsequence.cpp} +\end{algorithm} +\vfill\null\columnbreak + +\begin{algorithm}{Zykel Erkennung} + \begin{methods} + \method{cycleDetection}{findet Zyklus von $x_0$ und Länge in $f$}{b+l} + \end{methods} + \sourcecode{math/cycleDetection.cpp} +\end{algorithm} + +\begin{algorithm}{Permutationen} + \begin{methods} + \method{kthperm}{findet $k$-te Permutation \big($k \in [0, n!$)\big)}{n\*\log(n)} + \end{methods} + \sourcecode{math/kthperm.cpp} + \begin{methods} + \method{permIndex}{bestimmt Index der Permutation \big($\mathit{res} \in [0, n!$)\big)}{n\*\log(n)} + \end{methods} + \sourcecode{math/permIndex.cpp} +\end{algorithm} +\clearpage + +\subsection{Mod-Exponent und Multiplikation über $\boldsymbol{\mathbb{F}_p}$} +%\vspace{-1.25em} +%\begin{multicols}{2} +\method{mulMod}{berechnet $a \cdot b \bmod n$}{\log(b)} +\sourcecode{math/modMulIterativ.cpp} +% \vfill\null\columnbreak +\method{powMod}{berechnet $a^b \bmod n$}{\log(b)} +\sourcecode{math/modPowIterativ.cpp} +%\end{multicols} +%\vspace{-2.75em} +\begin{itemize} + \item für $a > 10^9$ \code{__int128} oder \code{modMul} benutzten! +\end{itemize} + +\begin{algorithm}{ggT, kgV, erweiterter euklidischer Algorithmus} + \runtime{\log(a) + \log(b)} + \sourcecode{math/extendedEuclid.cpp} +\end{algorithm} + +\subsection{Multiplikatives Inverses von $\boldsymbol{x}$ in $\boldsymbol{\mathbb{Z}/m\mathbb{Z}}$} +\textbf{Falls $\boldsymbol{m}$ prim:}\quad $x^{-1} \equiv x^{m-2} \bmod m$ + +\textbf{Falls $\boldsymbol{\ggT(x, m) = 1}$:} +\begin{itemize} + \item Erweiterter euklidischer Algorithmus liefert $\alpha$ und $\beta$ mit + $\alpha x + \beta m = 1$. + \item Nach Kongruenz gilt $\alpha x + \beta m \equiv \alpha x \equiv 1 \bmod m$. + \item $x^{-1} :\equiv \alpha \bmod m$ +\end{itemize} +\textbf{Sonst $\boldsymbol{\ggT(x, m) > 1}$:}\quad Es existiert kein $x^{-1}$. +% \sourcecode{math/multInv.cpp} +\sourcecode{math/shortModInv.cpp} + +\paragraph{Lemma von \textsc{Bézout}} +Sei $(x, y)$ eine Lösung der diophantischen Gleichung $ax + by = d$. +Dann lassen sich wie folgt alle Lösungen berechnen: +\[ +\left(x + k\frac{b}{\ggT(a, b)},~y - k\frac{a}{\ggT(a, b)}\right) +\] + +\paragraph{\textsc{Pell}-Gleichungen} +Sei $(\overline{x}, \overline{y})$ die Lösung von $x^2 - ny^2 = 1$, die $x>1$ minimiert. +Sei $(\tilde{x}, \tilde{y})$ die Lösung von $x^2-ny^2 = c$, die $x>1$ minimiert. Dann lassen +sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: +\begin{align*} + x_1&\coloneqq \tilde{x}, & y_1&\coloneqq\tilde{y}\\ + x_{k+1}&\coloneqq \overline{x}x_k+n\overline{y}y_k, & y_{k+1}&\coloneqq\overline{x}y_k+\overline{y}x_k +\end{align*} + +\begin{algorithm}{Lineare Kongruenz} + \begin{itemize} + \item Kleinste Lösung $x$ für $ax\equiv b\pmod{m}$. + \item Weitere Lösungen unterscheiden sich um \raisebox{2pt}{$\frac{m}{g}$}, es gibt + also $g$ Lösungen modulo $m$. + \end{itemize} + \sourcecode{math/linearCongruence.cpp} +\end{algorithm} + +\begin{algorithm}{Chinesischer Restsatz} + \begin{itemize} + \item Extrem anfällig gegen Overflows. Evtl. häufig 128-Bit Integer verwenden. + \item Direkte Formel für zwei Kongruenzen $x \equiv a \bmod n$, $x \equiv b \bmod m$: + \[ + x \equiv a - y \cdot n \cdot \frac{a - b}{d} \bmod \frac{mn}{d} + \qquad \text{mit} \qquad + d := \ggT(n, m) = yn + zm + \] + Formel kann auch für nicht teilerfremde Moduli verwendet werden. + Sind die Moduli nicht teilerfremd, existiert genau dann eine Lösung, + wenn $a\equiv~b \bmod \ggT(m, n)$. + In diesem Fall sind keine Faktoren + auf der linken Seite erlaubt. + \end{itemize} + \sourcecode{math/chineseRemainder.cpp} +\end{algorithm} + +\begin{algorithm}{Primzahltest \& Faktorisierung} + \method{isPrime}{prüft ob Zahl prim ist}{\log(n)^2} + \sourcecode{math/millerRabin.cpp} + \method{rho}{findet zufälligen Teiler}{\sqrt[\leftroot{3}\uproot{2}4]{n}} + \sourcecode{math/rho.cpp} + %\method{squfof}{findet zufälligen Teiler}{\sqrt[\leftroot{4}\uproot{2}4]{n}} + %\sourcecode{math/squfof.cpp} +\end{algorithm} + +\begin{algorithm}{Teiler} + \begin{methods} + \method{countDivisors}{Zählt Teiler von $n$}{\sqrt[\leftroot{3}\uproot{2}3]{n}} + \end{methods} + \sourcecode{math/divisors.cpp} +\end{algorithm} + +\begin{algorithm}{Matrix-Exponent} + \begin{methods} + \method{precalc}{berechnet $m^{2^b}$ vor}{\log(b)\*n^3} + \method{calc}{berechnet $m^b\cdot$}{\log(b)\cdot n^2} + \end{methods} + \textbf{Tipp:} wenn \code{v[x]=1} und \code{0} sonst, dann ist \code{res[y]} = $m^b_{y,x}$. + \sourcecode{math/matrixPower.cpp} +\end{algorithm} + +\begin{algorithm}{Lineare Rekurrenz} + \begin{methods} + \method{BerlekampMassey}{Berechnet eine lineare Rekurrenz $n$-ter Ordnung}{n^2} + \method{}{aus den ersten $2n$ Werte}{} + \end{methods} + \sourcecode{math/berlekampMassey.cpp} + Sei $f(n)=c_{0}f(n-1)+c_{1}f(n-2)+\dots + c_{n-1}f(0)$ eine lineare Rekurrenz. + + \begin{methods} + \method{kthTerm}{Berechnet $k$-ten Term einer Rekurrenz $n$-ter Ordnung}{\log(k)\cdot n^2} + \end{methods} + \sourcecode{math/linearRecurence.cpp} + Alternativ kann der \mbox{$k$-te} Term in \runtime{n^3\log(k)} berechnet werden: + $$\renewcommand\arraystretch{1.5} + \setlength\arraycolsep{3pt} + \begin{pmatrix} + c_{0} & c_{1} & \smash{\cdots} & \smash{\cdots} & c_{n-1} \\ + 1 & 0 & \smash{\cdots} & \smash{\cdots} & 0 \\ + 0 & \smash{\ddots} & \smash{\ddots} & & \smash{\vdots} \\ + \smash{\vdots} & \smash{\ddots} & \smash{\ddots} & \smash{\ddots} & \smash{\vdots} \\ + 0 & \smash{\cdots} & 0 & 1 & 0 \\ + \end{pmatrix}^k + \times~~ + \begin{pmatrix} + f(n-1) \\ + f(n-2) \\ + \smash{\vdots} \\ + \smash{\vdots} \\ + f(0) \\ + \end{pmatrix} + ~~=~~ + \begin{pmatrix} + f(n-1+k) \\ + f(n-2+k) \\ + \smash{\vdots} \\ + \smash{\vdots} \\ + f(k) \makebox[0pt][l]{\hspace{15pt}$\vcenter{\hbox{\huge$\leftarrow$}}$}\\ + \end{pmatrix} + $$ +\end{algorithm} + +\begin{algorithm}{Diskreter Logarithmus} + \begin{methods} + \method{solve}{bestimmt Lösung $x$ für $a^x=b \bmod m$}{\sqrt{m}\*\log(m)} + \end{methods} + \sourcecode{math/discreteLogarithm.cpp} +\end{algorithm} + +\begin{algorithm}{Diskrete Quadratwurzel} + \begin{methods} + \method{sqrtMod}{bestimmt Lösung $x$ für $x^2=a \bmod p$ }{\log(p)} + \end{methods} + \textbf{Wichtig:} $p$ muss prim sein! + \sourcecode{math/sqrtModCipolla.cpp} +\end{algorithm} +%\columnbreak + +\begin{algorithm}{Primitivwurzeln} + \begin{itemize} + \item Primitivwurzel modulo $n$ existiert $\Leftrightarrow$ $n \in \{2,\ 4,\ p^\alpha,\ 2\cdot p^\alpha \mid\ 2 < p \in \mathbb{P},\ \alpha \in \mathbb{N}\}$ + \item es existiert entweder keine oder $\varphi(\varphi(n))$ inkongruente Primitivwurzeln + \item Sei $g$ Primitivwurzel modulo $n$. + Dann gilt:\newline + Das kleinste $k$, sodass $g^k \equiv 1 \bmod n$, ist $k = \varphi(n)$. + \end{itemize} + \begin{methods} + \method{isPrimitive}{prüft ob $g$ eine Primitivwurzel ist}{\log(\varphi(n))\*\log(n)} + \method{findPrimitive}{findet Primitivwurzel (oder -1)}{\abs{ans}\*\log(\varphi(n))\*\log(n)} + \end{methods} + \sourcecode{math/primitiveRoot.cpp} +\end{algorithm} + +\begin{algorithm}{Diskrete \textrm{\textit{n}}-te Wurzel} + \begin{methods} + \method{root}{bestimmt Lösung $x$ für $x^a=b \bmod m$ }{\sqrt{m}\*\log(m)} + \end{methods} + Alle Lösungen haben die Form $g^{c + \frac{i \cdot \phi(n)}{\gcd(a, \phi(n))}}$ + \sourcecode{math/discreteNthRoot.cpp} +\end{algorithm} + +\begin{algorithm}{\textsc{Legendre}-Symbol} + Sei $p \geq 3$ eine Primzahl, $a \in \mathbb{Z}$: + \vspace{-0.15cm}\begin{align*} + \hspace*{3cm}\legendre{a}{p} &= + \begin{cases*} + \hphantom{-}0 & falls $p~\vert~a$ \\[-1ex] + \hphantom{-}1 & falls $\exists x \in \mathbb{Z}\backslash p\mathbb{Z} : a \equiv x^2 \bmod p$ \\[-1ex] + -1 & sonst + \end{cases*} \\ + \legendre{-1}{p} = (-1)^{\frac{p - 1}{2}} &= + \begin{cases*} + \hphantom{-}1 & falls $p \equiv 1 \bmod 4$ \\[-1ex] + -1 & falls $p \equiv 3 \bmod 4$ + \end{cases*} \\ + \legendre{2}{p} = (-1)^{\frac{p^2 - 1}{8}} &= + \begin{cases*} + \hphantom{-}1 & falls $p \equiv \pm 1 \bmod 8$ \\[-1ex] + -1 & falls $p \equiv \pm 3 \bmod 8$ + \end{cases*} + \end{align*} + \begin{align*} + \legendre{p}{q} \cdot \legendre{q}{p} = (-1)^{\frac{p - 1}{2} \cdot \frac{q - 1}{2}} && + \legendre{a}{p} \equiv a^{\frac{p-1}{2}}\bmod p + \end{align*} + \vspace{-0.05cm} + \sourcecode{math/legendre.cpp} +\end{algorithm} + +\begin{algorithm}{Lineares Sieb und Multiplikative Funktionen} + Eine (zahlentheoretische) Funktion $f$ heißt multiplikativ wenn $f(1)=1$ und $f(a\cdot b)=f(a)\cdot f(b)$, falls $\ggT(a,b)=1$. + + $\Rightarrow$ Es ist ausreichend $f(p^k)$ für alle primen $p$ und alle $k$ zu kennen. + + \begin{methods} + \method{sieve}{berechnet Primzahlen und co.}{N} + \method{sieved}{Wert der entsprechenden multiplikativen Funktion}{1} + + \method{naive}{Wert der entsprechenden multiplikativen Funktion}{\sqrt{n}} + \end{methods} + \textbf{Wichtig:} Sieb rechts ist schneller für \code{isPrime} oder \code{primes}! + + \sourcecode{math/linearSieve.cpp} + \textbf{\textsc{Möbius}-Funktion:} + \begin{itemize} + \item $\mu(n)=+1$, falls $n$ quadratfrei ist und gerade viele Primteiler hat + \item $\mu(n)=-1$, falls $n$ quadratfrei ist und ungerade viele Primteiler hat + \item $\mu(n)=0$, falls $n$ nicht quadratfrei ist + \end{itemize} + + \textbf{\textsc{Euler}sche $\boldsymbol{\varphi}$-Funktion:} + \begin{itemize} + \item Zählt die relativ primen Zahlen $\leq n$. + \item $p$ prim, $k \in \mathbb{N}$: + $~\varphi(p^k) = p^k - p^{k - 1}$ + + \item \textbf{Euler's Theorem:} + Für $b \geq \varphi(c)$ gilt: $a^b \equiv a^{b \bmod \varphi(c) + \varphi(c)} \pmod{c}$. Darüber hinaus gilt: $\gcd(a, c) = 1 \Leftrightarrow a^b \equiv a^{b \bmod \varphi(c)} \pmod{c}$. + Falls $m$ prim ist, liefert das den \textbf{kleinen Satz von \textsc{Fermat}}: + $a^{m} \equiv a \pmod{m}$ + \end{itemize} +\end{algorithm} + +\begin{algorithm}{Primzahlsieb von \textsc{Eratosthenes}} + \begin{itemize} + \item Bis $10^8$ in unter 64MB Speicher (lange Berechnung) + \end{itemize} + \begin{methods} + \method{primeSieve}{berechnet Primzahlen und Anzahl}{N\*\log(\log(N))} + \method{isPrime}{prüft ob Zahl prim ist}{1} + \end{methods} + \sourcecode{math/primeSieve.cpp} +\end{algorithm} + +\begin{algorithm}{\textsc{Möbius}-Inversion} + \begin{itemize} + \item Seien $f,g : \mathbb{N} \to \mathbb{N}$ und $g(n) := \sum_{d \vert n}f(d)$. + Dann ist $f(n) = \sum_{d \vert n}g(d)\mu(\frac{n}{d})$. + \item $\sum\limits_{d \vert n}\mu(d) = + \begin{cases*} + 1 & falls $n = 1$\\ + 0 & sonst + \end{cases*}$ + \end{itemize} + \textbf{Beispiel Inklusion/Exklusion:} + Gegeben sein eine Sequenz $A={a_1,\ldots,a_n}$ von Zahlen, $1 \leq a_i \leq N$. Zähle die Anzahl der \emph{coprime subsequences}.\newline + \textbf{Lösung}: + Für jedes $x$, sei $cnt[x]$ die Anzahl der Vielfachen von $x$ in $A$. + Es gibt $2^{[x]}-1$ nicht leere Subsequences in $A$, die nur Vielfache von $x$ enthalten. + Die Anzahl der Subsequences mit $\ggT=1$ ist gegeben durch $\sum_{i = 1}^N \mu(i) \cdot (2^{cnt[i]} - 1)$. +\end{algorithm} + +\subsection{LGS über $\boldsymbol{\mathbb{F}_p}$} +\method{gauss}{löst LGS}{n^3} +\sourcecode{math/lgsFp.cpp} + +\subsection{LGS über $\boldsymbol{\mathbb{R}}$} +\method{gauss}{löst LGS}{n^3} +\sourcecode{math/gauss.cpp} + +\vfill\null\columnbreak + +\begin{algorithm}{Numerisch Extremstelle bestimmen} + \sourcecode{math/goldenSectionSearch.cpp} +\end{algorithm} + +\begin{algorithm}{Numerisch Integrieren, Simpsonregel} + \sourcecode{math/simpson.cpp} +\end{algorithm} + + +\begin{algorithm}{Polynome, FFT, NTT \& andere Transformationen} + Multipliziert Polynome $A$ und $B$. + \begin{itemize} + \item $\deg(A \cdot B) = \deg(A) + \deg(B)$ + \item Vektoren \code{a} und \code{b} müssen mindestens Größe + $\deg(A \cdot B) + 1$ haben. + Größe muss eine Zweierpotenz sein. + \item Für ganzzahlige Koeffizienten: \code{(ll)round(real(a[i]))} + \item \emph{xor}, \emph{or} und \emph{and} Transform funktioniert auch mit \code{double} oder modulo einer Primzahl $p$ falls $p \geq 2^{\texttt{bits}}$ + \end{itemize} + %\sourcecode{math/fft.cpp} + %\sourcecode{math/ntt.cpp} + \sourcecode{math/transforms/fft.cpp} + \sourcecode{math/transforms/ntt.cpp} + \sourcecode{math/transforms/bitwiseTransforms.cpp} + Multiplikation mit 2 transforms statt 3: (nur benutzten wenn nötig!) + \sourcecode{math/transforms/fftMul.cpp} +\end{algorithm} + +\begin{algorithm}{Operations on Formal Power Series} + \sourcecode{math/transforms/seriesOperations.cpp} +\end{algorithm} + +\begin{algorithm}{Inversionszahl} + \sourcecode{math/inversions.cpp} +\end{algorithm} + +\subsection{Satz von \textsc{Sprague-Grundy}} +Weise jedem Zustand $X$ wie folgt eine \textsc{Grundy}-Zahl $g\left(X\right)$ zu: +\[ +g\left(X\right) := \min\left\{ +\mathbb{Z}_0^+ \setminus +\left\{g\left(Y\right) \mid Y \text{ von } X \text{ aus direkt erreichbar}\right\} +\right\} +\] +$X$ ist genau dann gewonnen, wenn $g\left(X\right) > 0$ ist.\\ +Wenn man $k$ Spiele in den Zuständen $X_1, \ldots, X_k$ hat, dann ist die \textsc{Grundy}-Zahl des Gesamtzustandes $g\left(X_1\right) \oplus \ldots \oplus g\left(X_k\right)$. + +\subsection{Kombinatorik} + +\paragraph{Wilsons Theorem} +A number $n$ is prime if and only if +$(n-1)!\equiv -1\bmod{n}$.\\ +($n$ is prime if and only if $(m-1)!\cdot(n-m)!\equiv(-1)^m\bmod{n}$ for all $m$ in $\{1,\dots,n\}$) +\begin{align*} + (n-1)!\equiv\begin{cases} + -1\bmod{n},&\mathrm{falls}~n \in \mathbb{P}\\ + \hphantom{-}2\bmod{n},&\mathrm{falls}~n = 4\\ + \hphantom{-}0\bmod{n},&\mathrm{sonst} + \end{cases} +\end{align*} + +\paragraph{\textsc{Zeckendorfs} Theorem} +Jede positive natürliche Zahl kann eindeutig als Summe einer oder mehrerer +verschiedener \textsc{Fibonacci}-Zahlen geschrieben werden, sodass keine zwei +aufeinanderfolgenden \textsc{Fibonacci}-Zahlen in der Summe vorkommen.\\ +\emph{Lösung:} Greedy, nimm immer die größte \textsc{Fibonacci}-Zahl, die noch +hineinpasst. + +\paragraph{\textsc{Lucas}-Theorem} +Ist $p$ prim, $m=\sum_{i=0}^km_ip^i$, $n=\sum_{i=0}^kn_ip^i$ ($p$-adische Darstellung), +so gilt +\vspace{-0.75\baselineskip} +\[ + \binom{m}{n} \equiv \prod_{i=0}^k\binom{m_i}{n_i} \bmod{p}. +\] + +%\begin{algorithm}{Binomialkoeffizienten} +\paragraph{Binomialkoeffizienten} + Die Anzahl der \mbox{$k$-elementigen} Teilmengen einer \mbox{$n$-elementigen} Menge. + + \begin{methods} + \method{precalc}{berechnet $n!$ und $n!^{-1}$ vor}{\mathit{lim}} + \method{calc\_binom}{berechnet Binomialkoeffizient}{1} + \end{methods} + \sourcecode{math/binomial0.cpp} + Falls $n >= p$ for $\mathit{mod}=p^k$ berechne \textit{fac} und \textit{inv} aber teile $p$ aus $i$ und berechne die häufigkeit von $p$ in $n!$ als $\sum\limits_{i=1}\big\lfloor\frac{n}{p^i}\big\rfloor$ + + \begin{methods} + \method{calc\_binom}{berechnet Binomialkoeffizient $(n \le 61)$}{k} + \end{methods} + \sourcecode{math/binomial1.cpp} + + \begin{methods} + \method{calc\_binom}{berechnet Binomialkoeffizient modulo Primzahl $p$}{p-n} + \end{methods} + \sourcecode{math/binomial3.cpp} + +% \begin{methods} +% \method{calc\_binom}{berechnet Primfaktoren vom Binomialkoeffizient}{n} +% \end{methods} +% \textbf{WICHTIG:} braucht alle Primzahlen $\leq n$ +% \sourcecode{math/binomial2.cpp} +%\end{algorithm} + +\paragraph{\textsc{Catalan}-Zahlen} +\begin{itemize} + \item Die \textsc{Catalan}-Zahl $C_n$ gibt an: + \begin{itemize} + \item Anzahl der Binärbäume mit $n$ nicht unterscheidbaren Knoten. + \item Anzahl der validen Klammerausdrücke mit $n$ Klammerpaaren. + \item Anzahl der korrekten Klammerungen von $n+1$ Faktoren. + \item Anzahl Möglichkeiten ein konvexes Polygon mit $n + 2$ Ecken zu triangulieren. + \item Anzahl der monotonen Pfade (zwischen gegenüberliegenden Ecken) in + einem $n \times n$-Gitter, die nicht die Diagonale kreuzen. + \end{itemize} +\end{itemize} +\[C_0 = 1\qquad C_n = \sum\limits_{k = 0}^{n - 1} C_kC_{n - 1 - k} = +\frac{1}{n + 1}\binom{2n}{n} = \frac{4n - 2}{n+1} \cdot C_{n-1}\] +\begin{itemize} + \item Formel $1$ erlaubt Berechnung ohne Division in \runtime{n^2} + \item Formel $2$ und $3$ erlauben Berechnung in \runtime{n} +\end{itemize} + +\paragraph{\textsc{Catalan}-Convolution} +\begin{itemize} + \item Anzahl an Klammerausdrücken mit $n+k$ Klammerpaaren, die mit $(^k$ beginnen. +\end{itemize} +\[C^k_0 = 1\qquad C^k_n = \sum\limits_{\mathclap{a_0+a_1+\dots+a_k=n}} C_{a_0}C_{a_1}\cdots C_{a_k} = +\frac{k+1}{n+k+1}\binom{2n+k}{n} = \frac{(2n+k-1)\cdot(2n+k)}{n(n+k+1)} \cdot C_{n-1}\] + +\paragraph{\textsc{Euler}-Zahlen 1. Ordnung} +Die Anzahl der Permutationen von $\{1, \ldots, n\}$ mit genau $k$ Anstiegen. +Für die $n$-te Zahl gibt es $n$ mögliche Positionen zum Einfügen. +Dabei wird entweder ein Anstieg in zwei gesplitted oder ein Anstieg um $n$ ergänzt. +\[\eulerI{n}{0} = \eulerI{n}{n-1} = 1 \quad +\eulerI{n}{k} = (k+1) \eulerI{n-1}{k} + (n-k) \eulerI{n-1}{k-1}= +\sum_{i=0}^{k} (-1)^i\binom{n+1}{i}(k+1-i)^n\] +\begin{itemize} + \item Formel $1$ erlaubt Berechnung ohne Division in \runtime{n^2} + \item Formel $2$ erlaubt Berechnung in \runtime{n\log(n)} +\end{itemize} + +\paragraph{\textsc{Euler}-Zahlen 2. Ordnung} +Die Anzahl der Permutationen von $\{1,1, \ldots, n,n\}$ mit genau $k$ Anstiegen. +\[\eulerII{n}{0} = 1 \qquad\eulerII{n}{n} = 0 \qquad\eulerII{n}{k} = (k+1) \eulerII{n-1}{k} + (2n-k-1) \eulerII{n-1}{k-1}\] +\begin{itemize} + \item Formel erlaubt Berechnung ohne Division in \runtime{n^2} +\end{itemize} + +\paragraph{\textsc{Stirling}-Zahlen 1. Ordnung} +Die Anzahl der Permutationen von $\{1, \ldots, n\}$ mit genau $k$ Zyklen. +Es gibt zwei Möglichkeiten für die $n$-te Zahl. Entweder sie bildet einen eigene Zyklus, oder sie kann an jeder Position in jedem Zyklus einsortiert werden. +\[\stirlingI{0}{0} = 1 \qquad +\stirlingI{n}{0} = \stirlingI{0}{n} = 0 \qquad +\stirlingI{n}{k} = \stirlingI{n-1}{k-1} + (n-1) \stirlingI{n-1}{k}\] +\begin{itemize} + \item Formel erlaubt berechnung ohne Division in \runtime{n^2} +\end{itemize} +\[\sum_{k=0}^{n}\pm\stirlingI{n}{k}x^k=x(x-1)(x-2)\cdots(x-n+1)\] +\begin{itemize} + \item Berechne Polynom mit FFT und benutzte betrag der Koeffizienten \runtime{n\log(n)^2} (nur ungefähr gleich große Polynome zusammen multiplizieren beginnend mit $x-k$) +\end{itemize} + +\paragraph{\textsc{Stirling}-Zahlen 2. Ordnung} +Die Anzahl der Möglichkeiten $n$ Elemente in $k$ nichtleere Teilmengen zu zerlegen. +Es gibt $k$ Möglichkeiten die $n$ in eine $n-1$-Partition einzuordnen. +Dazu kommt der Fall, dass die $n$ in ihrer eigenen Teilmenge (alleine) steht. +\[\stirlingII{n}{1} = \stirlingII{n}{n} = 1 \qquad +\stirlingII{n}{k} = k \stirlingII{n-1}{k} + \stirlingII{n-1}{k-1} = +\frac{1}{k!} \sum\limits_{i=0}^{k} (-1)^{k-i}\binom{k}{i}i^n\] +\begin{itemize} + \item Formel $1$ erlaubt Berechnung ohne Division in \runtime{n^2} + \item Formel $2$ erlaubt Berechnung in \runtime{n\log(n)} +\end{itemize} + +\paragraph{\textsc{Bell}-Zahlen} +Anzahl der Partitionen von $\{1, \ldots, n\}$. +Wie \textsc{Stirling}-Zahlen 2. Ordnung ohne Limit durch $k$. +\[B_1 = 1 \qquad +B_n = \sum\limits_{k = 0}^{n - 1} B_k\binom{n-1}{k} += \sum\limits_{k = 0}^{n}\stirlingII{n}{k}\qquad\qquad B_{p^m+n}\equiv m\cdot B_n + B_{n+1} \bmod{p}\] + +\paragraph{Partitions} +Die Anzahl der Partitionen von $n$ in genau $k$ positive Summanden. +Die Anzahl der Partitionen von $n$ mit Elementen aus ${1,\dots,k}$. +\begin{align*} + p_0(0)=1 \qquad p_k(n)&=0 \text{ für } k > n \text{ oder } n \leq 0 \text{ oder } k \leq 0\\ + p_k(n)&= p_k(n-k) + p_{k-1}(n-1)\\[2pt] + p(n)&=\sum_{k=1}^{n} p_k(n)=p_n(2n)=\sum\limits_{k\neq0}^\infty(-1)^{k+1}p\bigg(n - \frac{k(3k-1)}{2}\bigg) +\end{align*} +\begin{itemize} + \item in Formel $3$ kann abgebrochen werden wenn $\frac{k(3k-1)}{2} > n$. + \item Die Anzahl der Partitionen von $n$ in bis zu $k$ positive Summanden ist $\sum\limits_{i=0}^{k}p_i(n)=p_k(n+k)$. +\end{itemize} + +\subsection{The Twelvefold Way \textnormal{(verteile $n$ Bälle auf $k$ Boxen)}} +\input{math/tables/twelvefold} + +\optional{ +\subsection{Primzahlzählfunktion $\boldsymbol{\pi}$} +\begin{methods} + \method{init}{berechnet $\pi$ bis $N$}{N\*\log(\log(N))} + \method{phi}{zählt zu $p_i$ teilerfremde Zahlen $\leq n$ für alle $i \leq k$}{???} + \method{pi}{zählt Primzahlen $\leq n$ ($n < N^2$)}{n^{2/3}} +\end{methods} +\sourcecode{math/piLehmer.cpp} +} + +%\input{math/tables/numbers} + +\begin{algorithm}[optional]{Big Integers} + \sourcecode{math/bigint.cpp} +\end{algorithm} diff --git a/content/math/matrixPower.cpp b/content/math/matrixPower.cpp new file mode 100644 index 0000000..d981e6e --- /dev/null +++ b/content/math/matrixPower.cpp @@ -0,0 +1,14 @@ +vector pows; + +void precalc(mat m) { + pows = {mat(sz(m.m), 1), m}; + for (int i = 1; i < 60; i++) pows.push_back(pows[i] * pows[i]); +} + +auto calc(ll b, vector v) { + for (ll i = 1; b > 0; i++) { + if (b & 1) v = pows[i] * v; + b /= 2; + } + return v; +} diff --git a/content/math/millerRabin.cpp b/content/math/millerRabin.cpp new file mode 100644 index 0000000..cb27d29 --- /dev/null +++ b/content/math/millerRabin.cpp @@ -0,0 +1,19 @@ +constexpr ll bases32[] = {2, 7, 61}; +constexpr ll bases64[] = {2, 325, 9375, 28178, 450775, + 9780504, 1795265022}; +bool isPrime(ll n) { + if (n < 2 || n % 2 == 0) return n == 2; + ll d = n - 1, j = 0; + while (d % 2 == 0) d /= 2, j++; + for (ll a : bases64) { + if (a % n == 0) continue; + ll v = powMod(a, d, n); //with mulmod or int128 + if (v == 1 || v == n - 1) continue; + for (ll i = 1; i <= j; i++) { + v = ((lll)v * v) % n; + if (v == n - 1 || v <= 1) break; + } + if (v != n - 1) return false; + } + return true; +} diff --git a/content/math/modExp.cpp b/content/math/modExp.cpp new file mode 100644 index 0000000..2329a94 --- /dev/null +++ b/content/math/modExp.cpp @@ -0,0 +1,6 @@ +ll powMod(ll a, ll b, ll n) { + if(b == 0) return 1; + if(b == 1) return a % n; + if(b & 1) return (powMod(a, b - 1, n) * a) % n; + else return powMod((a * a) % n, b / 2, n); +} diff --git a/content/math/modMulIterativ.cpp b/content/math/modMulIterativ.cpp new file mode 100644 index 0000000..611f09a --- /dev/null +++ b/content/math/modMulIterativ.cpp @@ -0,0 +1,9 @@ +ll mulMod(ll a, ll b, ll n) { + ll res = 0; + while (b > 0) { + if (b & 1) res = (a + res) % n; + a = (a * 2) % n; + b /= 2; + } + return res; +} diff --git a/content/math/modPowIterativ.cpp b/content/math/modPowIterativ.cpp new file mode 100644 index 0000000..0dc3fb1 --- /dev/null +++ b/content/math/modPowIterativ.cpp @@ -0,0 +1,9 @@ +ll powMod(ll a, ll b, ll n) { + ll res = 1; + while (b > 0) { + if (b & 1) res = (a * res) % n; + a = (a * a) % n; + b /= 2; + } + return res; +} diff --git a/content/math/multInv.cpp b/content/math/multInv.cpp new file mode 100644 index 0000000..647dc2d --- /dev/null +++ b/content/math/multInv.cpp @@ -0,0 +1,4 @@ +ll multInv(ll x, ll m) { + auto [d, a, b] = extendedEuclid(x, m); // Implementierung von oben. + return ((a % m) + m) % m; +} diff --git a/content/math/permIndex.cpp b/content/math/permIndex.cpp new file mode 100644 index 0000000..4cffc12 --- /dev/null +++ b/content/math/permIndex.cpp @@ -0,0 +1,13 @@ +ll permIndex(vector v) { + Tree t; + reverse(all(v)); + for (ll& x : v) { + t.insert(x); + x = t.order_of_key(x); + } + ll res = 0; + for (int i = sz(v); i > 0; i--) { + res = res * i + v[i - 1]; + } + return res; +} diff --git a/content/math/piLegendre.cpp b/content/math/piLegendre.cpp new file mode 100644 index 0000000..21b974b --- /dev/null +++ b/content/math/piLegendre.cpp @@ -0,0 +1,23 @@ +constexpr ll cache = 500; // requires O(cache^3) +vector> memo(cache * cache, vector(cache)); + +ll pi(ll n); + +ll phi(ll n, ll k) { + if (n <= 1 || k < 0) return 0; + if (n <= primes[k]) return n - 1; + if (n < N && primes[k] * primes[k] > n) return n - pi(n) + k; + bool ok = n < cache * cache; + if (ok && memo[n][k] > 0) return memo[n][k]; + ll res = n/primes[k] - phi(n/primes[k], k - 1) + phi(n, k - 1); + if (ok) memo[n][k] = res; + return res; +} + +ll pi(ll n) { + if (n < N) { // implement this as O(1) lookup for speedup! + return distance(primes.begin(), upper_bound(all(primes), n)); + } else { + ll k = pi(sqrtl(n) + 1); + return n - phi(n, k) + k; +}} diff --git a/content/math/piLehmer.cpp b/content/math/piLehmer.cpp new file mode 100644 index 0000000..17df85e --- /dev/null +++ b/content/math/piLehmer.cpp @@ -0,0 +1,52 @@ +constexpr ll cacheA = 2 * 3 * 5 * 7 * 11 * 13 * 17; +constexpr ll cacheB = 7; +ll memoA[cacheA + 1][cacheB + 1]; +ll memoB[cacheB + 1]; +ll memoC[N]; + +void init() { + primeSieve(); // @\sourceref{math/primeSieve.cpp}@ + for (ll i = 0; i < N; i++) { + memoC[i] = memoC[i - 1]; + if (isPrime(i)) memoC[i]++; + } + memoB[0] = 1; + for(ll i = 0; i <= cacheA; i++) memoA[i][0] = i; + for(ll i = 1; i <= cacheB; i++) { + memoB[i] = primes[i - 1] * memoB[i - 1]; + for(ll j = 1; j <= cacheA; j++) { + memoA[j][i] = memoA[j][i - 1] - memoA[j / + primes[i - 1]][i - 1]; +}}} + +ll phi(ll n, ll k) { + if(k == 0) return n; + if(k <= cacheB) + return memoA[n % memoB[k]][k] + + (n / memoB[k]) * memoA[memoB[k]][k]; + if(n <= primes[k - 1]*primes[k - 1]) return memoC[n] - k + 1; + if(n <= primes[k - 1]*primes[k - 1]*primes[k - 1] && n < N) { + ll b = memoC[(ll)sqrtl(n)]; + ll res = memoC[n] - (b + k - 2) * (b - k + 1) / 2; + for(ll i = k; i < b; i++) res += memoC[n / primes[i]]; + return res; + } + return phi(n, k - 1) - phi(n / primes[k - 1], k - 1); +} + +ll pi(ll n) { + if (n < N) return memoC[n]; + ll a = pi(sqrtl(sqrtl(n))); + ll b = pi(sqrtl(n)); + ll c = pi(cbrtl(n)); + ll res = phi(n, a) + (b + a - 2) * (b - a + 1) / 2; + for (ll i = a; i < b; i++) { + ll w = n / primes[i]; + res -= pi(w); + if (i > c) continue; + ll bi = pi(sqrtl(w)); + for (ll j = i; j < bi; j++) { + res -= pi(w / primes[j]) - j; + }} + return res; +} diff --git a/content/math/polynomial.cpp b/content/math/polynomial.cpp new file mode 100644 index 0000000..44f6207 --- /dev/null +++ b/content/math/polynomial.cpp @@ -0,0 +1,65 @@ +struct poly { + vector data; + + poly(int deg = 0) : data(max(1, deg)) {} + poly(initializer_list _data) : data(_data) {} + + int size() const {return sz(data);} + + void trim() { + for (ll& x : data) x = (x % mod + mod) % mod; + while (size() > 1 && data.back() == 0) data.pop_back(); + } + + ll& operator[](int x) {return data[x];} + const ll& operator[](int x) const {return data[x];} + + ll operator()(int x) const { + ll res = 0; + for (int i = size() - 1; i >= 0; i--) + res = (res * x + data[i]) % mod; + return res % mod; + } + + poly& operator+=(const poly& o) { + if (size() < o.size()) data.resize(o.size()); + for (int i = 0; i < o.size(); i++) + data[i] = (data[i] + o[i]) % mod; + return *this; + } + + poly operator*(const poly& o) const { + poly res(size() + o.size() - 1); + for (int i = 0; i < size(); i++) { + for (int j = 0; j < o.size(); j++) { + res[i + j] += (data[i] * o[j]) % mod; + }} + res.trim(); + return res; + } + + //return p(x+a) + poly operator<<(ll a) const { + poly res(size()); + for (int i = size() - 1; i >= 0; i--) { + for (int j = size() - i - 1; j >= 1; j--) + res[j] = (res[j] * a + res[j - 1]) % mod; + res[0] = (res[0] * a + res[i]) % mod; + } + return res; + } + + pair divmod(const poly& d) const { + int i = size() - d.size(); + poly s(i + 1), r = *this; + ll inv = multInv(d.data.back(), mod); + for (; i >= 0; i--) { + s[i] = (r.data.back() * inv) % mod; + r.data.pop_back(); + for (int j = 0; i + j < r.size(); j++) { + r[i + j] = (r.data[i + j] - s[i] * d[j]) % mod; + }} + s.trim(); r.trim(); + return {s, r}; + } +}; diff --git a/content/math/primeSieve.cpp b/content/math/primeSieve.cpp new file mode 100644 index 0000000..1b0f514 --- /dev/null +++ b/content/math/primeSieve.cpp @@ -0,0 +1,16 @@ +constexpr ll N = 100'000'000; +bitset isNotPrime; +vector primes = {2}; + +bool isPrime(ll x) { + if (x < 2 || x % 2 == 0) return x == 2; + else return !isNotPrime[x / 2]; +} + +void primeSieve() { + for (ll i = 3; i < N; i += 2) {// i * i < N reicht für isPrime + if (!isNotPrime[i / 2]) { + primes.push_back(i); // optional + for (ll j = i * i; j < N; j+= 2 * i) { + isNotPrime[j / 2] = 1; +}}}} diff --git a/content/math/primitiveRoot.cpp b/content/math/primitiveRoot.cpp new file mode 100644 index 0000000..39a0f64 --- /dev/null +++ b/content/math/primitiveRoot.cpp @@ -0,0 +1,23 @@ +bool isPrimitive(ll g, ll n, ll phi, map& phiFacts) { + if (g == 1) return n == 2; + if (gcd(g, n) > 1) return false; + for (auto [f, _] : phiFacts) + if (powMod(g, phi / f, n) == 1) return false; + return true; +} + +bool isPrimitive(ll g, ll n) { + ll phin = phi(n); //isPrime(n) => phi(n) = n - 1 + map phiFacts; + factor(phin, phiFacts); + return isPrimitive(g, n, phin, phiFacts); +} + +ll findPrimitive(ll n) { //test auf existens geht schneller + ll phin = phi(n); //isPrime(n) => phi(n) = n - 1 + map phiFacts; + factor(phin, phiFacts); + for (ll res = 1; res < n; res++) // oder zufällige Reihenfolge + if (isPrimitive(res, n, phin, phiFacts)) return res; + return -1; +} diff --git a/content/math/rho.cpp b/content/math/rho.cpp new file mode 100644 index 0000000..ad640cd --- /dev/null +++ b/content/math/rho.cpp @@ -0,0 +1,19 @@ +using lll = __int128; +ll rho(ll n) { // Findet Faktor < n, nicht unbedingt prim. + if (n % 2 == 0) return 2; + ll x = 0, y = 0, prd = 2, i = n/2 + 7; + auto f = [&](lll c){return (c * c + i) % n;}; + for (ll t = 30; t % 40 || gcd(prd, n) == 1; t++) { + if (x == y) x = ++i, y = f(x); + if (ll q = (lll)prd * abs(x-y) % n; q) prd = q; + x = f(x); y = f(f(y)); + } + return gcd(prd, n); +} + +void factor(ll n, map& facts) { + if (n == 1) return; + if (isPrime(n)) {facts[n]++; return;} + ll f = rho(n); + factor(n / f, facts); factor(f, facts); +} diff --git a/content/math/shortModInv.cpp b/content/math/shortModInv.cpp new file mode 100644 index 0000000..f696cce --- /dev/null +++ b/content/math/shortModInv.cpp @@ -0,0 +1,3 @@ +ll multInv(ll x, ll m) { // x^{-1} mod m + return 1 < x ? m - multInv(m % x, x) * m / x : 1; +} diff --git a/content/math/simpson.cpp b/content/math/simpson.cpp new file mode 100644 index 0000000..7f237a4 --- /dev/null +++ b/content/math/simpson.cpp @@ -0,0 +1,12 @@ +//double f(double x) {return x;} + +double simps(double a, double b) { + return (f(a) + 4.0 * f((a + b) / 2.0) + f(b)) * (b - a) / 6.0; +} + +double integrate(double a, double b) { + double m = (a + b) / 2.0; + double l = simps(a, m), r = simps(m, b), tot = simps(a, b); + if (abs(l + r - tot) < EPS) return tot; + return integrate(a, m) + integrate(m, b); +} diff --git a/content/math/sqrtModCipolla.cpp b/content/math/sqrtModCipolla.cpp new file mode 100644 index 0000000..1fac0c5 --- /dev/null +++ b/content/math/sqrtModCipolla.cpp @@ -0,0 +1,14 @@ +ll sqrtMod(ll a, ll p) {// teste mit legendre ob lösung existiert + if (a < 2) return a; + ll t = 0; + while (legendre((t*t-4*a) % p, p) >= 0) t = rng() % p; + ll b = -t, c = -t, d = 1, m = p; + for (m++; m /= 2; b = (a+a-b*b) % p, a = (a*a) % p) { + if (m % 2) { + d = (c-d*b) % p; + c = (c*a) % p; + } else { + c = (d*a - c*b) % p; + }} + return (d + p) % p; +} diff --git a/content/math/squfof.cpp b/content/math/squfof.cpp new file mode 100644 index 0000000..1cb97de --- /dev/null +++ b/content/math/squfof.cpp @@ -0,0 +1,89 @@ +using lll = __int128; + +constexpr lll multipliers[] = {1, 3, 5, 7, + 11, 3*5, 3*7, 3*11, + 5*7, 5*11, 7*11, + 3*5*7, 3*5*11, 3*7*11, + 5*7*11, 3*5*7*11}; + +lll root(lll x) { + lll r = sqrtl(x); + while(r*r < x) r++; + while(r*r > x) r--; + return r; +} + +lll croot(lll x) { + lll r = cbrtl(x); + while(r*r*r < x) r++; + while(r*r*r > x) r--; + return r; +} + +lll squfof(lll N) { + lll s = croot(N); + if (s*s*s == N) return s; + s = root(N); + if (s*s == N) return s; + for (lll k : multipliers) { + lll D = k * N; + lll Po, P, Pprev, q, b, r, i; + Po = Pprev = P = root(D); + lll Qprev = 1; + lll Q = D - Po*Po; + lll L = 2 * root(2 * s); + lll B = 3 * L; + for (i = 2; i < B; i++) { + b = (Po + P) / Q; + P = b*Q - P; + q = Q; + Q = Qprev + b * (Pprev - P); + r = root(Q); + if (!(i & 1) && r*r == Q) break; + Qprev = q; + Pprev = P; + } + if (i >= B) continue; + b = (Po - P) / r; + Pprev = P = b*r + P; + Qprev = r; + Q = (D-Pprev*Pprev)/Qprev; + i = 0; + do { + b = (Po + P) / Q; + Pprev = P; + P = b*Q - P; + q = Q; + Q = Qprev + b * (Pprev - P); + Qprev = q; + i++; + } while(P != Pprev); + r = gcd(N, Qprev); + if (r != 1 && r != N) return r; + } + exit(1);//try fallback to pollard rho +} + +constexpr lll trialLim = 5'000; + +void factor(lll n, map& facts) { + for (lll i = 2; i * i <= n && i <= trialLim; i++) { + while (n % i == 0) { + facts[i]++; + n /= i; + }} + if (n > 1 && n < trialLim * trialLim) { + facts[n]++; + } else { + vector todo = {n}; + while (!todo.empty()) { + lll c = todo.back(); + todo.pop_back(); + if (c == 1) continue; + if (isPrime(c)) { + facts[c]++; + } else { + lll d = squfof(c); + todo.push_back(d); + todo.push_back(c / d); +}}}} diff --git a/content/math/tables.tex b/content/math/tables.tex new file mode 100644 index 0000000..53f3758 --- /dev/null +++ b/content/math/tables.tex @@ -0,0 +1,18 @@ +\enlargethispage{0.2cm} +\begin{multicols*}{2} + \input{math/tables/binom} + \vfill + \input{math/tables/composite} + \vfill + \input{math/tables/platonic} + \vfill + \input{math/tables/series} + + \columnbreak + + \input{math/tables/probability} + \vfill + \input{math/tables/stuff} + \vfill + \input{math/tables/nim} +\end{multicols*} diff --git a/content/math/tables/binom.tex b/content/math/tables/binom.tex new file mode 100644 index 0000000..878a6b0 --- /dev/null +++ b/content/math/tables/binom.tex @@ -0,0 +1,28 @@ +\begin{tabularx}{\linewidth}{|XXXX|} + \hline + \multicolumn{4}{|c|}{Binomialkoeffizienten} \\ + \hline + \multicolumn{4}{|c|}{ + $\frac{n!}{k!(n - k)!} \hfill=\hfill + \binom{n}{k} \hfill=\hfill + \binom{n}{n - k} \hfill=\hfill + \frac{n}{k}\binom{n - 1}{k - 1} \hfill=\hfill + \frac{n-k+1}{k}\binom{n}{k - 1} \hfill=\hfill + \binom{n - 1}{k} + \binom{n - 1}{k - 1} \hfill=\hfill + (-1)^k \binom{k - n - 1}{k} \hfill\approx\hfill + 2^{n} \cdot \frac{2}{\sqrt{2\pi n}}\cdot\exp\left(-\frac{2(x - \frac{n}{2})^2}{n}\right)$ + } \\ + \grayhline + + $\sum\limits_{k = 0}^n \binom{n}{k} = 2^n$ & + $\sum\limits_{k = 0}^n \binom{k}{m} = \binom{n + 1}{m + 1}$ & + $\sum\limits_{i = 0}^n \binom{n}{i}^2 = \binom{2n}{n}$ & + $\sum\limits_{k = 0}^n\binom{r + k}{k} = \binom{r + n + 1}{n}$\\ + + $\binom{n}{m}\binom{m}{k} = \binom{n}{k}\binom{n - k}{m - k}$ & + $\sum\limits_{k = 0}^n \binom{r}{k}\binom{s}{n - k} = \binom{r + s}{n}$ & + \multicolumn{2}{l|}{ + $\sum\limits_{i = 1}^n \binom{n}{i} F_i = F_{2n} \quad F_n = n\text{-th Fib.}$ + }\\ + \hline +\end{tabularx} diff --git a/content/math/tables/composite.tex b/content/math/tables/composite.tex new file mode 100644 index 0000000..c261db1 --- /dev/null +++ b/content/math/tables/composite.tex @@ -0,0 +1,27 @@ + +\begin{tabularx}{\linewidth}{|r||r||r|r||r|r|r||C|} + \hline + \multicolumn{8}{|c|}{Important Numbers} \\ + \hline + $10^x$ & Highly Composite & \# Divs & $<$ Prime & $>$ Prime & \# Primes & primorial & \\ + \hline + 1 & 6 & 4 & $-3$ & $+1$ & 4 & 2 & \\ + 2 & 60 & 12 & $-3$ & $+1$ & 25 & 3 & \\ + 3 & 840 & 32 & $-3$ & $+9$ & 168 & 4 & \\ + 4 & 7\,560 & 64 & $-27$ & $+7$ & 1\,229 & 5 & \\ + 5 & 83\,160 & 128 & $-9$ & $+3$ & 9\,592 & 6 & \\ + 6 & 720\,720 & 240 & $-17$ & $+3$ & 78\,498 & 7 & \\ + 7 & 8\,648\,640 & 448 & $-9$ & $+19$ & 664\,579 & 8 & \\ + 8 & 73\,513\,440 & 768 & $-11$ & $+7$ & 5\,761\,455 & 8 & \\ + 9 & 735\,134\,400 & 1\,344 & $-63$ & $+7$ & 50\,847\,534 & 9 & \\ + 10 & 6\,983\,776\,800 & 2\,304 & $-33$ & $+19$ & 455\,052\,511 & 10 & \\ + 11 & 97\,772\,875\,200 & 4\,032 & $-23$ & $+3$ & 4\,118\,054\,813 & 10 & \\ + 12 & 963\,761\,198\,400 & 6\,720 & $-11$ & $+39$ & 37\,607\,912\,018 & 11 & \\ + 13 & 9\,316\,358\,251\,200 & 10\,752 & $-29$ & $+37$ & 346\,065\,536\,839 & 12 & \\ + 14 & 97\,821\,761\,637\,600 & 17\,280 & $-27$ & $+31$ & 3\,204\,941\,750\,802 & 12 & \\ + 15 & 866\,421\,317\,361\,600 & 26\,880 & $-11$ & $+37$ & 29\,844\,570\,422\,669 & 13 & \\ + 16 & 8\,086\,598\,962\,041\,600 & 41\,472 & $-63$ & $+61$ & 279\,238\,341\,033\,925 & 13 & \\ + 17 & 74\,801\,040\,398\,884\,800 & 64\,512 & $-3$ & $+3$ & 2\,623\,557\,157\,654\,233 & 14 & \\ + 18 & 897\,612\,484\,786\,617\,600 & 103\,680 & $-11$ & $+3$ & 24\,739\,954\,287\,740\,860 & 16 & \\ + \hline +\end{tabularx} diff --git a/content/math/tables/nim.tex b/content/math/tables/nim.tex new file mode 100644 index 0000000..8490d42 --- /dev/null +++ b/content/math/tables/nim.tex @@ -0,0 +1,96 @@ +\begin{tabularx}{\linewidth}{|p{0.37\linewidth}|X|} + \hline + \multicolumn{2}{|c|}{Nim-Spiele (\ding{182} letzter gewinnt (normal), \ding{183} letzter verliert)} \\ + \hline + Beschreibung & + Strategie \\ + \hline + + $M = [\mathit{pile}_i]$\newline + $[x] := \{1, \ldots, x\}$& + $\mathit{SG} = \oplus_{i = 1}^n \mathit{pile}_i$\newline + \ding{182} Nimm von einem Stapel, sodass $\mathit{SG}$ $0$ wird.\newline + \ding{183} Genauso. + Außer: Bleiben nur noch Stapel der Größe $1$, erzeuge ungerade Anzahl solcher Stapel.\\ + \hline + + $M = \{a^m \mid m \geq 0\}$ & + $a$ ungerade: $\mathit{SG}_n = n \% 2$\newline + $a$ gerade:\newline + $\mathit{SG}_n = 2$, falls $n \equiv a \bmod (a + 1) $\newline + $\mathit{SG}_n = n \% (a + 1) \% 2$, sonst.\\ + \hline + + $M_{\text{\ding{172}}} = \left[\frac{\mathit{pile}_i}{2}\right]$\newline + $M_{\text{\ding{173}}} = + \left\{\left\lceil\frac{\mathit{pile}_i}{2}\right\rceil,~ + \mathit{pile}_i\right\}$ & + \ding{172} + $\mathit{SG}_{2n} = n$, + $\mathit{SG}_{2n+1} = \mathit{SG}_n$\newline + \ding{173} + $\mathit{SG}_0 = 0$, + $\mathit{SG}_n = [\log_2 n] + 1$ \\ + \hline + + $M_{\text{\ding{172}}} = \text{Teiler von $\mathit{pile}_i$}$\newline + $M_{\text{\ding{173}}} = \text{echte Teiler von $\mathit{pile}_i$}$ & + \ding{172} + $\mathit{SG}_0 = 0$, + $\mathit{SG}_n = \mathit{SG}_{\text{\ding{173},n}} + 1$\newline + \ding{173} + $\mathit{ST}_1 = 0$, + $\mathit{SG}_n = \text{\#Nullen am Ende von $n_{bin}$}$\\ + \hline + + $M_{\text{\ding{172}}} = [k]$\newline + $M_{\text{\ding{173}}} = S$, ($S$ endlich)\newline + $M_{\text{\ding{174}}} = S \cup \{\mathit{pile}_i\}$ & + $\mathit{SG}_{\text{\ding{172}}, n} = n \bmod (k + 1)$\newline + \ding{182} Niederlage bei $\mathit{SG} = 0$\newline + \ding{183} Niederlage bei $\mathit{SG} = 1$\newline + $\mathit{SG}_{\text{\ding{174}}, n} = \mathit{SG}_{\text{\ding{173}}, n} + 1$\\ + \hline + + \multicolumn{2}{|l|}{ + Für jedes endliche $M$ ist $\mathit{SG}$ eines Stapels irgendwann periodisch. + } \\ + \hline + + \textsc{Moore}'s Nim:\newline + Beliebige Zahl von maximal $k$ Stapeln. & + \ding{182} + Schreibe $\mathit{pile}_i$ binär. + Addiere ohne Übertrag zur Basis $k + 1$. + Niederlage, falls Ergebnis gleich 0.\newline + \ding{183} + Wenn alle Stapel $1$ sind: + Niederlage, wenn $n \equiv 1 \bmod (k + 1)$. + Sonst wie in \ding{182}.\\ + \hline + + Staircase Nim:\newline + $n$ Stapel in einer Reihe. + Beliebige Zahl von Stapel $i$ nach Stapel $i-1$. & + Niederlage, wenn Nim der ungeraden Spiele verloren ist:\newline + $\oplus_{i = 0}^{(n - 1) / 2} \mathit{pile}_{2i + 1} = 0$\\ + \hline + + \textsc{Lasker}'s Nim:\newline + Zwei mögliche Züge:\newline + 1) Nehme beliebige Zahl.\newline + 2) Teile Stapel in zwei Stapel (ohne Entnahme).& + $\mathit{SG}_n = n$, falls $n \equiv 1,2 \bmod 4$\newline + $\mathit{SG}_n = n + 1$, falls $n \equiv 3 \bmod 4$\newline + $\mathit{SG}_n = n - 1$, falls $n \equiv 0 \bmod 4$\\ + \hline + + \textsc{Kayles}' Nim:\newline + Zwei mögliche Züge:\newline + 1) Nehme beliebige Zahl.\newline + 2) Teile Stapel in zwei Stapel (mit Entnahme).& + Berechne $\mathit{SG}_n$ für kleine $n$ rekursiv.\newline + $n \in [72,83]: \quad 4, 1, 2, 8, 1, 4, 7, 2, 1, 8, 2, 7$\newline + Periode ab $n = 72$ der Länge $12$.\\ + \hline +\end{tabularx} diff --git a/content/math/tables/numbers.tex b/content/math/tables/numbers.tex new file mode 100644 index 0000000..1dc9f38 --- /dev/null +++ b/content/math/tables/numbers.tex @@ -0,0 +1,59 @@ +\begin{expandtable} +\begin{tabularx}{\linewidth}{|l|X|} + \hline + \multicolumn{2}{|c|}{Berühmte Zahlen} \\ + \hline + \textsc{Fibonacci} & + $f(0) = 0 \quad + f(1) = 1 \quad + f(n+2) = f(n+1) + f(n)$ \\ + \grayhline + + \textsc{Catalan} & + $C_0 = 1 \qquad + C_n = \sum\limits_{k = 0}^{n - 1} C_kC_{n - 1 - k} = + \frac{1}{n + 1}\binom{2n}{n} = \frac{2(2n - 1)}{n+1} \cdot C_{n-1}$ \\ + \grayhline + + \textsc{Euler} I & + $\eulerI{n}{0} = \eulerI{n}{n-1} = 1 \qquad + \eulerI{n}{k} = (k+1) \eulerI{n-1}{k} + (n-k) \eulerI{n-1}{k-1} $ \\ + \grayhline + + \textsc{Euler} II & + $\eulerII{n}{0} = 1 \quad + \eulerII{n}{n} = 0 \quad$\\ + & $\eulerII{n}{k} = (k+1) \eulerII{n-1}{k} + (2n-k-1) \eulerII{n-1}{k-1}$ \\ + \grayhline + + \textsc{Stirling} I & + $\stirlingI{0}{0} = 1 \qquad + \stirlingI{n}{0} = \stirlingI{0}{n} = 0 \qquad + \stirlingI{n}{k} = \stirlingI{n-1}{k-1} + (n-1) \stirlingI{n-1}{k}$ \\ + \grayhline + + \textsc{Stirling} II & + $\stirlingII{n}{1} = \stirlingII{n}{n} = 1 \qquad + \stirlingII{n}{k} = k \stirlingII{n-1}{k} + \stirlingII{n-1}{k-1} = + \frac{1}{k!} \sum\limits_{j=0}^{k} (-1)^{k-j}\binom{k}{j}j^n$\\ + \grayhline + + \textsc{Bell} & + $B_1 = 1 \qquad + B_n = \sum\limits_{k = 0}^{n - 1} B_k\binom{n-1}{k} + = \sum\limits_{k = 0}^{n}\stirlingII{n}{k}$\\ + \grayhline + + \textsc{Partitions} & + $p(0,0) = 1 \quad + p(n,k) = 0 \text{ für } k > n \text{ oder } n \leq 0 \text{ oder } k \leq 0$ \\ + & $p(n,k) = p(n-k,k) + p(n-1,k-1)$\\ + \grayhline + + \textsc{Partitions} & + $f(0) = 1 \quad f(n) = 0~(n < 0)$ \\ + & $f(n)=\sum\limits_{k=1}^\infty(-1)^{k-1}f(n - \frac{k(3k+1)}{2})+\sum\limits_{k=1}^\infty(-1)^{k-1}f(n - \frac{k(3k-1)}{2})$\\ + + \hline +\end{tabularx} +\end{expandtable} diff --git a/content/math/tables/platonic.tex b/content/math/tables/platonic.tex new file mode 100644 index 0000000..f4ee554 --- /dev/null +++ b/content/math/tables/platonic.tex @@ -0,0 +1,39 @@ +\begin{tabularx}{\linewidth}{|X|CCCX|} + \hline + \multicolumn{5}{|c|}{Platonische Körper} \\ + \hline + Übersicht & Seiten & Ecken & Kanten & dual zu \\ + \hline + Tetraeder & 4 & 4 & 6 & Tetraeder \\ + Würfel/Hexaeder & 6 & 8 & 12 & Oktaeder \\ + Oktaeder & 8 & 6 & 12 & Würfel/Hexaeder\\ + Dodekaeder & 12 & 20 & 30 & Ikosaeder \\ + Ikosaeder & 20 & 12 & 30 & Dodekaeder \\ + \hline + \multicolumn{5}{|c|}{Färbungen mit maximal $n$ Farben (bis auf Isomorphie)} \\ + \hline + \multicolumn{3}{|l}{Ecken vom Oktaeder/Seiten vom Würfel} & + \multicolumn{2}{l|}{$(n^6 + 3n^4 + 12n^3 + 8n^2)/24$} \\ + + \multicolumn{3}{|l}{Ecken vom Würfel/Seiten vom Oktaeder} & + \multicolumn{2}{l|}{$(n^8 + 17n^4 + 6n^2)/24$} \\ + + \multicolumn{3}{|l}{Kanten vom Würfel/Oktaeder} & + \multicolumn{2}{l|}{$(n^{12} + 6n^7 + 3n^6 + 8n^4 + 6n^3)/24$} \\ + + \multicolumn{3}{|l}{Ecken/Seiten vom Tetraeder} & + \multicolumn{2}{l|}{$(n^4 + 11n^2)/12$} \\ + + \multicolumn{3}{|l}{Kanten vom Tetraeder} & + \multicolumn{2}{l|}{$(n^6 + 3n^4 + 8n^2)/12$} \\ + + \multicolumn{3}{|l}{Ecken vom Ikosaeder/Seiten vom Dodekaeder} & + \multicolumn{2}{l|}{$(n^{12} + 15n^6 + 44n^4)/60$} \\ + + \multicolumn{3}{|l}{Ecken vom Dodekaeder/Seiten vom Ikosaeder} & + \multicolumn{2}{l|}{$(n^{20} + 15n^{10} + 20n^8 + 24n^4)/60$} \\ + + \multicolumn{3}{|l}{Kanten vom Dodekaeder/Ikosaeder (evtl. falsch)} & + \multicolumn{2}{l|}{$(n^{30} + 15n^{16} + 20n^{10} + 24n^6)/60$} \\ + \hline +\end{tabularx} diff --git a/content/math/tables/probability.tex b/content/math/tables/probability.tex new file mode 100644 index 0000000..f265d10 --- /dev/null +++ b/content/math/tables/probability.tex @@ -0,0 +1,27 @@ +\begin{tabularx}{\linewidth}{|LICIR|} + \hline + \multicolumn{3}{|c|}{ + Wahrscheinlichkeitstheorie ($A,B$ Ereignisse und $X,Y$ Variablen) + } \\ + \hline + $\E(X + Y) = \E(X) + \E(Y)$ & + $\E(\alpha X) = \alpha \E(X)$ & + $X, Y$ unabh. $\Leftrightarrow \E(XY) = \E(X) \cdot \E(Y)$\\ + + $\Pr[A \vert B] = \frac{\Pr[A \land B]}{\Pr[B]}$ & + $A, B$ disj. $\Leftrightarrow \Pr[A \land B] = \Pr[A] \cdot \Pr[B]$ & + $\Pr[A \lor B] = \Pr[A] + \Pr[B] - \Pr[A \land B]$ \\ + \hline +\end{tabularx} +\vfill +\begin{tabularx}{\linewidth}{|Xlr|lrX|} + \hline + \multicolumn{6}{|c|}{\textsc{Bertrand}'s Ballot Theorem (Kandidaten $A$ und $B$, $k \in \mathbb{N}$)} \\ + \hline + & $\#A > k\#B$ & $Pr = \frac{a - kb}{a + b}$ & + $\#B - \#A \leq k$ & $Pr = 1 - \frac{a!b!}{(a + k + 1)!(b - k - 1)!}$ & \\ + + & $\#A \geq k\#B$ & $Pr = \frac{a + 1 - kb}{a + 1}$ & + $\#A \geq \#B + k$ & $Num = \frac{a - k + 1 - b}{a - k + 1} \binom{a + b - k}{b}$ & \\ + \hline +\end{tabularx} diff --git a/content/math/tables/series.tex b/content/math/tables/series.tex new file mode 100644 index 0000000..3042781 --- /dev/null +++ b/content/math/tables/series.tex @@ -0,0 +1,33 @@ +\begin{tabularx}{\linewidth}{|XIXIXIX|} + \hline + \multicolumn{4}{|c|}{Reihen} \\ + \hline + $\sum\limits_{i = 1}^n i = \frac{n(n+1)}{2}$ & + $\sum\limits_{i = 1}^n i^2 = \frac{n(n + 1)(2n + 1)}{6}$ & + $\sum\limits_{i = 1}^n i^3 = \frac{n^2 (n + 1)^2}{4}$ & + $H_n = \sum\limits_{i = 1}^n \frac{1}{i}$ \\ + \grayhline + + $\sum\limits_{i = 0}^n c^i = \frac{c^{n + 1} - 1}{c - 1} \quad c \neq 1$ & + $\sum\limits_{i = 0}^\infty c^i = \frac{1}{1 - c} \quad \vert c \vert < 1$ & + $\sum\limits_{i = 1}^\infty c^i = \frac{c}{1 - c} \quad \vert c \vert < 1$ & + $\sum\limits_{i = 0}^\infty ic^i = \frac{c}{(1 - c)^2} \quad \vert c \vert < 1$ \\ + \grayhline + + \multicolumn{2}{|lI}{ + $\sum\limits_{i = 0}^n ic^i = \frac{nc^{n + 2} - (n + 1)c^{n + 1} + c}{(c - 1)^2} \quad c \neq 1$ + } & + \multicolumn{2}{l|}{ + $\sum\limits_{i = 1}^n iH_i = \frac{n(n + 1)}{2}H_n - \frac{n(n - 1)}{4}$ + } \\ + \grayhline + + \multicolumn{2}{|lI}{ + $\sum\limits_{i = 1}^n H_i = (n + 1)H_n - n$ + } & + \multicolumn{2}{l|}{ + $\sum\limits_{i = 1}^n \binom{i}{m}H_i = + \binom{n + 1}{m + 1} \left(H_{n + 1} - \frac{1}{m + 1}\right)$ + } \\ + \hline +\end{tabularx} diff --git a/content/math/tables/stuff.tex b/content/math/tables/stuff.tex new file mode 100644 index 0000000..3cf8b4c --- /dev/null +++ b/content/math/tables/stuff.tex @@ -0,0 +1,32 @@ +\begin{tabularx}{\linewidth}{|ll|} + \hline + \multicolumn{2}{|C|}{Verschiedenes} \\ + \hline + Türme von Hanoi, minimale Schirttzahl: & + $T_n = 2^n - 1$ \\ + + \#Regionen zwischen $n$ Geraden & + $\frac{n\left(n + 1\right)}{2} + 1$ \\ + + \#abgeschlossene Regionen zwischen $n$ Geraden & + $\frac{n^2 - 3n + 2}{2}$ \\ + + \#markierte, gewurzelte Bäume & + $n^{n-1}$ \\ + + \#markierte, nicht gewurzelte Bäume & + $n^{n-2}$ \\ + + \#Wälder mit $k$ gewurzelten Bäumen & + $\frac{k}{n}\binom{n}{k}n^{n-k}$ \\ + + \#Wälder mit $k$ gewurzelten Bäumen mit vorgegebenen Wurzelknoten& + $\frac{k}{n}n^{n-k}$ \\ + + Derangements & + $!n = (n - 1)(!(n - 1) + !(n - 2)) = \left\lfloor\frac{n!}{e} + \frac{1}{2}\right\rfloor$ \\ + & + $\lim\limits_{n \to \infty} \frac{!n}{n!} = \frac{1}{e}$ \\ + \hline +\end{tabularx} + diff --git a/content/math/tables/twelvefold.tex b/content/math/tables/twelvefold.tex new file mode 100644 index 0000000..18d3955 --- /dev/null +++ b/content/math/tables/twelvefold.tex @@ -0,0 +1,32 @@ +\begin{expandtable} +\begin{tabularx}{\linewidth}{|C|CICICIC|} + \hline + Bälle & identisch & verschieden & identisch & verschieden \\ + Boxen & identisch & identisch & verschieden & verschieden \\ + \hline + -- & + $p_k(n + k)$ & + $\sum\limits_{i = 0}^k \stirlingII{n}{i}$ & + $\binom{n + k - 1}{k - 1}$ & + $k^n$ \\ + \grayhline + + \makecell{Bälle pro\\Box $\geq 1$} & + $p_k(n)$ & + $\stirlingII{n}{k}$ & + $\binom{n - 1}{k - 1}$ & + $k! \stirlingII{n}{k}$ \\ + \grayhline + + \makecell{Bälle pro\\Box $\leq 1$} & + $[n \leq k]$ & + $[n \leq k]$ & + $\binom{k}{n}$ & + $n! \binom{k}{n}$ \\ + \hline + \multicolumn{5}{|l|}{ + $[\text{Bedingung}]$: \code{return Bedingung ? 1 : 0;} + } \\ + \hline +\end{tabularx} +\end{expandtable} diff --git a/content/math/transforms/andTransform.cpp b/content/math/transforms/andTransform.cpp new file mode 100644 index 0000000..1fd9f5c --- /dev/null +++ b/content/math/transforms/andTransform.cpp @@ -0,0 +1,8 @@ +void fft(vector& a, bool inv = false) { + int n = sz(a); + for (int s = 1; s < n; s *= 2) { + for (int i = 0; i < n; i += 2 * s) { + for (int j = i; j < i + s; j++) { + ll& u = a[j], &v = a[j + s]; + tie(u, v) = inv ? pair(v - u, u) : pair(v, u + v); +}}}} diff --git a/content/math/transforms/bitwiseTransforms.cpp b/content/math/transforms/bitwiseTransforms.cpp new file mode 100644 index 0000000..28561da --- /dev/null +++ b/content/math/transforms/bitwiseTransforms.cpp @@ -0,0 +1,12 @@ +void bitwiseConv(vector& a, bool inv = false) { + int n = sz(a); + for (int s = 1; s < n; s *= 2) { + for (int i = 0; i < n; i += 2 * s) { + for (int j = i; j < i + s; j++) { + ll& u = a[j], &v = a[j + s]; + tie(u, v) = inv ? pair(v - u, u) : pair(v, u + v); // AND + //tie(u, v) = inv ? pair(v, u - v) : pair(u + v, u); //OR + //tie(u, v) = pair(u + v, u - v); // XOR + }}} + //if (inv) for (ll& x : a) x /= n; // XOR (careful with MOD) +} diff --git a/content/math/transforms/fft.cpp b/content/math/transforms/fft.cpp new file mode 100644 index 0000000..2bd95b2 --- /dev/null +++ b/content/math/transforms/fft.cpp @@ -0,0 +1,23 @@ +using cplx = complex; + +void fft(vector& a, bool inv = false) { + int n = sz(a); + for (int i = 0, j = 1; j < n - 1; ++j) { + for (int k = n >> 1; k > (i ^= k); k >>= 1); + if (j < i) swap(a[i], a[j]); + } + static vector ws(2, 1); + for (static int k = 2; k < n; k *= 2) { + ws.resize(n); + cplx w = polar(1.0, acos(-1.0) / k); + for (int i=k; i<2*k; i++) ws[i] = ws[i/2] * (i % 2 ? w : 1); + } + for (int s = 1; s < n; s *= 2) { + for (int j = 0; j < n; j += 2 * s) { + for (int k = 0; k < s; k++) { + cplx u = a[j + k], t = a[j + s + k]; + t *= (inv ? conj(ws[s + k]) : ws[s + k]); + a[j + k] = u + t; + a[j + s + k] = u - t; + if (inv) a[j + k] /= 2, a[j + s + k] /= 2; +}}}} diff --git a/content/math/transforms/fftMul.cpp b/content/math/transforms/fftMul.cpp new file mode 100644 index 0000000..660ed79 --- /dev/null +++ b/content/math/transforms/fftMul.cpp @@ -0,0 +1,15 @@ +vector mul(vector& a, vector& b) { + int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); + vector c(all(a)), d(n); + c.resize(n); + for (int i = 0; i < sz(b); i++) c[i] = {real(c[i]), b[i]}; + fft(c); + for (int i = 0; i < n; i++) { + int j = (n - i) & (n - 1); + cplx x = (c[i] + conj(c[j])) / cplx{2, 0}; //fft(a)[i]; + cplx y = (c[i] - conj(c[j])) / cplx{0, 2}; //fft(b)[i]; + d[i] = x * y; + } + fft(d, true); + return d; +} diff --git a/content/math/transforms/multiplyBitwise.cpp b/content/math/transforms/multiplyBitwise.cpp new file mode 100644 index 0000000..f7cf169 --- /dev/null +++ b/content/math/transforms/multiplyBitwise.cpp @@ -0,0 +1,8 @@ +vector mul(vector a, vector b) { + int n = 1 << (__lg(2 * max(sz(a), sz(b)) - 1)); + a.resize(n), b.resize(n); + bitwiseConv(a), bitwiseConv(b); + for (int i=0; i mul(vector& a, vector& b) { + int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); + vector a2(all(a)), b2(all(b)); + a2.resize(n), b2.resize(n); + fft(a2), fft(b2); + for (int i=0; i ans(n); + for (int i=0; i mul(vector a, vector b) { + int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); + a.resize(n), b.resize(n); + ntt(a), ntt(b); + for (int i=0; i& a, bool inv = false) { + int n = sz(a); + auto b = a; + ll r = inv ? powMod(root, mod - 2, mod) : root; + + for (int s = n / 2; s > 0; s /= 2) { + ll ws = powMod(r, (mod - 1) / (n / s), mod), w = 1; + for (int j = 0; j < n / 2; j += s) { + for (int k = j; k < j + s; k++) { + ll u = a[j + k], t = a[j + s + k] * w % mod; + b[k] = (u + t) % mod; + b[n/2 + k] = (u - t + mod) % mod; + } + w = w * ws % mod; + } + swap(a, b); + } + if (inv) { + ll div = powMod(n, mod - 2, mod); + for (auto& x : a) x = x * div % mod; +}} diff --git a/content/math/transforms/orTransform.cpp b/content/math/transforms/orTransform.cpp new file mode 100644 index 0000000..eb1da44 --- /dev/null +++ b/content/math/transforms/orTransform.cpp @@ -0,0 +1,8 @@ +void fft(vector& a, bool inv = false) { + int n = sz(a); + for (int s = 1; s < n; s *= 2) { + for (int i = 0; i < n; i += 2 * s) { + for (int j = i; j < i + s; j++) { + ll& u = a[j], &v = a[j + s]; + tie(u, v) = inv ? pair(v, u - v) : pair(u + v, u); +}}}} diff --git a/content/math/transforms/seriesOperations.cpp b/content/math/transforms/seriesOperations.cpp new file mode 100644 index 0000000..4743674 --- /dev/null +++ b/content/math/transforms/seriesOperations.cpp @@ -0,0 +1,56 @@ +vector poly_inv(const vector& a, int n) { + vector q = {powMod(a[0], mod-2, mod)}; + for (int len = 1; len < n; len *= 2){ + vector a2 = a, q2 = q; + a2.resize(2*len), q2.resize(2*len); + ntt(q2); + for (int j : {0, 1}) { + ntt(a2); + for (int i = 0; i < 2*len; i++) a2[i] = a2[i]*q2[i] % mod; + ntt(a2, true); + for (int i = 0; i < len; i++) a2[i] = 0; + } + for (int i = len; i < min(n, 2*len); i++) { + q.push_back((mod - a2[i]) % mod); + }} + return q; +} + +vector poly_deriv(vector a) { + for (int i = 1; i < sz(a); i++) + a[i-1] = a[i] * i % mod; + a.pop_back(); + return a; +} + +vector poly_integr(vector a) { + if (a.empty()) return {0}; + a.push_back(a.back() * powMod(sz(a), mod-2, mod) % mod); + for (int i = sz(a)-2; i > 0; i--) + a[i] = a[i-1] * powMod(i, mod-2, mod) % mod; + a[0] = 0; + return a; +} + +vector poly_log(vector a, int n) { + a = mul(poly_deriv(a), poly_inv(a, n)); + a.resize(n-1); + a = poly_integr(a); + return a; +} + +vector poly_exp(vector a, int n) { + vector q = {1}; + for (int len = 1; len < n; len *= 2) { + vector p = poly_log(q, 2*len); + for (int i = 0; i < 2*len; i++) + p[i] = (mod - p[i] + (i < sz(a) ? a[i] : 0)) % mod; + vector q2 = q; + q2.resize(2*len); + ntt(p), ntt(q2); + for (int i = 0; i < 2*len; i++) p[i] = p[i] * q2[i] % mod; + ntt(p, true); + for (int i = len; i < min(n, 2*len); i++) q.push_back(p[i]); + } + return q; +} diff --git a/content/math/transforms/xorTransform.cpp b/content/math/transforms/xorTransform.cpp new file mode 100644 index 0000000..f9d1d82 --- /dev/null +++ b/content/math/transforms/xorTransform.cpp @@ -0,0 +1,10 @@ +void fft(vector& a, bool inv = false) { + int n = sz(a); + for (int s = 1; s < n; s *= 2) { + for (int i = 0; i < n; i += 2 * s) { + for (int j = i; j < i + s; j++) { + ll& u = a[j], &v = a[j + s]; + tie(u, v) = pair(u + v, u - v); + }}} + if (inv) for (ll& x : a) x /= n; +} diff --git a/content/other/bitOps.cpp b/content/other/bitOps.cpp new file mode 100644 index 0000000..8079305 --- /dev/null +++ b/content/other/bitOps.cpp @@ -0,0 +1,18 @@ +// Iteriert über alle Teilmengen einer Bitmaske +// (außer der leeren Menge). +for (int subset = bitmask; subset > 0; + subset = (subset - 1) & bitmask) + +// Zählt Anzahl der gesetzten Bits. +int numberOfSetBits(int i) { + i = i - ((i >> 1) & 0x5555'5555); + i = (i & 0x3333'3333) + ((i >> 2) & 0x3333'3333); + return (((i + (i >> 4)) & 0x0F0F'0F0F) * 0x0101'0101) >> 24; +} + +// Nächste Permutation in Bitmaske +// (z.B. 00111 => 01011 => 01101 => ...) +ll nextPerm(ll v) { + ll t = v | (v - 1); + return (t+1) | (((~t & -~t) - 1) >> (__builtin_ctzll(v) + 1)); +} diff --git a/content/other/compiletime.cpp b/content/other/compiletime.cpp new file mode 100644 index 0000000..b71f83b --- /dev/null +++ b/content/other/compiletime.cpp @@ -0,0 +1,7 @@ +template +struct Table { + int data[N]; + constexpr Table() : data {} { + for (int i = 0; i < N; i++) data[i] = i; +}}; +constexpr Table<100'000> precalculated; diff --git a/content/other/divideAndConquer.cpp b/content/other/divideAndConquer.cpp new file mode 100644 index 0000000..830dc7f --- /dev/null +++ b/content/other/divideAndConquer.cpp @@ -0,0 +1,27 @@ +vector> dp; +vector> C; + +void rec(int i, int j0, int j1, int m0, int m1) { + if (j1 < j0) return; + int jmid = (j0 + j1) / 2; + + dp[i][jmid] = inf; + int bestk = m0; + for (int k = m0; k < min(jmid, m1 + 1); ++k) { + if (dp[i - 1][k] + C[k + 1][jmid] < dp[i][jmid]) { + dp[i][jmid] = dp[i - 1][k] + C[k + 1][jmid]; + bestk = k; + }} + + rec(i, j0, jmid - 1, m0, bestk); + rec(i, jmid + 1, j1, bestk, m1); +} + +ll calc(int n, int m) { + dp = vector>(m, vector(n, inf)); + for (int i = 0; i < n; i++) dp[0][i] = C[0][i]; + for (int i = 1; i < m; i++) { + rec(i, 0, n - 1, 0, n - 1); + } + return dp[m - 1][n - 1]; +} diff --git a/content/other/fastIO.cpp b/content/other/fastIO.cpp new file mode 100644 index 0000000..9badcc7 --- /dev/null +++ b/content/other/fastIO.cpp @@ -0,0 +1,24 @@ +void fastscan(int& number) { + bool negative = false; + int c; + number = 0; + c = getchar(); + while(c != '-' && (c < '0' || c > '9')) c = getchar(); + if (c == '-') negative = true, c = getchar(); + for (; c >= '0' && c <= '9'; c = getchar()) number = number * 10 + c - '0'; + if (negative) number *= -1; +} + +void printPositive(int n) { + if (n == 0) return; + printPositive(n / 10); + putchar(n % 10 + '0'); +} + +void fastprint(int n) { + if(n == 0) {putchar('0'); return;} + if (n < 0) { + putchar('-'); + printPositive(-n); + } else printPositive(n); +} diff --git a/content/other/josephus2.cpp b/content/other/josephus2.cpp new file mode 100644 index 0000000..5086e13 --- /dev/null +++ b/content/other/josephus2.cpp @@ -0,0 +1,8 @@ +int rotateLeft(int n) { // Der letzte Überlebende, 1-basiert. + for (int i = 31; i >= 0; i--) { + if (n & (1 << i)) { + n &= ~(1 << i); + break; + }} + n <<= 1; n++; return n; +} diff --git a/content/other/josephusK.cpp b/content/other/josephusK.cpp new file mode 100644 index 0000000..5025f89 --- /dev/null +++ b/content/other/josephusK.cpp @@ -0,0 +1,5 @@ +// Der letzte Überlebende, 0-basiert. +int josephus(int n, int k) { + if (n == 1) return 0; + return (josephus(n - 1, k) + k) % n; +} diff --git a/content/other/knuth.cpp b/content/other/knuth.cpp new file mode 100644 index 0000000..1d513c8 --- /dev/null +++ b/content/other/knuth.cpp @@ -0,0 +1,15 @@ +ll calc(int n, int m, const vector>& C) { + vector> dp(m, vector(n, inf)); + vector> opt(m, vector(n + 1, n - 1)); + + for (int i = 0; i < n; i++) dp[0][i] = C[0][i]; + for (int i = 1; i < m; i++) { + for (int j = n - 1; j >= 0; --j) { + opt[i][j] = i == 1 ? 0 : opt[i - 1][j]; + for (int k = opt[i][j]; k <= min(opt[i][j+1], j-1); k++) { + if (dp[i][j] <= dp[i - 1][k] + C[k + 1][j]) continue; + dp[i][j] = dp[i - 1][k] + C[k + 1][j]; + opt[i][j] = k; + }}} + return dp[m - 1][n - 1]; +} diff --git a/content/other/other.tex b/content/other/other.tex new file mode 100644 index 0000000..b47893f --- /dev/null +++ b/content/other/other.tex @@ -0,0 +1,312 @@ +\section{Sonstiges} + +\begin{algorithm}{Compiletime} + \begin{itemize} + \item überprüfen ob Compilezeit Berechnungen erlaubt sind! + \item braucht \code{c++14} oder höher! + \end{itemize} + \sourcecode{other/compiletime.cpp} +\end{algorithm} + +\begin{algorithm}{Timed} + Kann benutzt werden um randomisierte Algorithmen so lange wie möglich laufen zu lassen. + \sourcecode{other/timed.cpp} +\end{algorithm} + +\begin{algorithm}{Bit Operations} + \begin{expandtable} + \begin{tabularx}{\linewidth}{|Ll|} + \hline + Bit an Position j lesen & \code{(x & (1 << j)) != 0} \\ + Bit an Position j setzten & \code{x |= (1 << j)} \\ + Bit an Position j löschen & \code{x &= ~(1 << j)} \\ + Bit an Position j flippen & \code{x ^= (1 << j)} \\ + Anzahl an führenden nullen ($x \neq 0$) & \code{__builtin_clzll(x)} \\ + Anzahl an schließenden nullen ($x \neq 0$) & \code{__builtin_ctzll(x)} \\ + Anzahl an \code{1} bits & \code{__builtin_popcountll(x)} \\ + $i$-te Zahl eines Graycodes & \code{i ^ (i >> 1)} \\ + \hline + \end{tabularx}\\ + \end{expandtable} + \sourcecode{other/bitOps.cpp} +\end{algorithm} + +\begin{algorithm}{Overflow-sichere arithmetische Operationen} + Gibt zurück, ob es einen Overflow gab. Wenn nicht, enthält \code{c} das Ergebnis. + \begin{expandtable} + \begin{tabularx}{\linewidth}{|lR|} + \hline + Addition & \code{__builtin_saddll_overflow(a, b, &c)} \\ + Subtraktion & \code{__builtin_ssubll_overflow(a, b, &c)} \\ + Multiplikation & \code{__builtin_smulll_overflow(a, b, &c)} \\ + \hline + \end{tabularx} + \end{expandtable} +\end{algorithm} + +\begin{algorithm}{Pragmas} + \sourcecode{other/pragmas.cpp} +\end{algorithm} + +\begin{algorithm}{DP Optimizations} + Aufgabe: Partitioniere Array in genau $m$ zusammenhängende Teile mit minimalen Kosten: + $dp[i][j] = \min_{kn>0,~k>0$ und $m\not\equiv n \bmod 2$ dann beschreibt diese Formel alle Pythagoreischen Tripel eindeutig: + \[k~\cdot~\Big(~a=m^2-n^2,\quad b=2mn,\quad c=m^2+n^2~\Big)\] + + \item \textbf{Centroids of a Tree:} + Ein \emph{Centroid} ist ein Knoten, der einen Baum in Komponenten der maximalen Größe $\frac{\abs{V}}{2}$ splitted. + Es kann $2$ Centroids geben! + + \item \textbf{Centroid Decomposition:} + Wähle zufälligen Knoten und mache DFS. + Verschiebe ausgewählten Knoten in Richtung des tiefsten Teilbaums, bis Centroid gefunden. Entferne Knoten, mache rekursiv in Teilbäumen weiter. Laufzeit:~\runtime{\abs{V} \cdot \log(\abs{V})}. + \item \textbf{Gregorian Calendar:} Der Anfangstag des Jahres ist alle $400$ Jahre gleich. + + \item \textbf{Pivotsuche und Rekursion auf linkem und rechtem Teilarray:} + Suche gleichzeitig von links und rechts nach Pivot, um Worst Case von + $\runtime{n^2}$ zu $\runtime{n\log n}$ zu verbessern. + + \item \textbf{\textsc{Mo}'s Algorithm:} + SQRT-Decomposition auf $n$ Intervall Queries $[l,r]$. + Gruppiere Queries in $\sqrt{n}$ Blöcke nach linker Grenze $l$. + Sortiere nach Block und bei gleichem Block nach rechter Grenze $r$. + Beantworte Queries offline durch schrittweise Vergrößern/Verkleinern des aktuellen Intervalls. + Laufzeit:~\runtime{n\cdot\sqrt{n}}. + (Anzahl der Blöcke als Konstante in Code schreiben.) + + \item \textbf{SQRT Techniques:} + \begin{itemize} + \item Aufteilen in \emph{leichte} (wert $\leq\sqrt{x}$) und \emph{schwere} (höchsten $\sqrt{x}$ viele) Objekte. + \item Datenstruktur in Blöcke fester Größe (z.b. 256 oder 512) aufteilen. + \item Datenstruktur nach fester Anzahl Updates komplett neu bauen. + \item Wenn die Summe über $x_i$ durch $X$ beschränkt ist, dann gibt es nur $\sqrt{2X}$ verschiedene Werte von $x_i$ (z.b. Längen von Strings). + \item Wenn $w\cdot h$ durch $X$ beschränkt ist, dann ist $\min(w,h)\leq\sqrt{X}$. + \end{itemize} + + \item \textbf{Partition:} + Gegeben Gewichte $w_0+w_1+\cdots+w_k=W$, existiert eine Teilmenge mit Gewicht $x$? + Drei gleiche Gewichte $w$ können zu $w$ und $2w$ kombiniert werden ohne die Lösung zu ändern $\Rightarrow$ nur $2\sqrt{W}$ unterschiedliche Gewichte. + Mit bitsets daher selbst für $10^5$ lösbar. +\end{itemize} + +\subsection{Tipps \& Tricks} + +\begin{itemize} + \item \textbf{Run Time Error:} + \begin{itemize} + \item Stack Overflow? Evtl. rekursive Tiefensuche auf langem Pfad? + \item Array-Grenzen überprüfen. Indizierung bei $0$ oder bei $1$ beginnen? + \item Abbruchbedingung bei Rekursion? + \item Evtl. Memory Limit Exceeded? Mit \code{/usr/bin/time -v} erhält man den maximalen Speicherverbrauch bei der Ausführung (Maximum resident set size). + \end{itemize} + + \item \textbf{Strings:} + \begin{itemize} + \item Soll \codeSafe{"aa"} kleiner als \codeSafe{"z"} sein oder nicht? + \item bit \code{0x20} beeinflusst Groß-/Kleinschreibung. + \end{itemize} + + \item \textbf{Zeilenbasierte Eingabe}: + \begin{itemize} + \item \code{getline(cin, str)} liest Zeile ein. + \item Wenn vorher \code{cin >> ...} benutzt, lese letztes \code{\\n} mit \code{getline(cin, x)}. + \end{itemize} + + \item \textbf{Gleitkommazahlen:} + \begin{itemize} + \item \code{NaN}? Evtl. ungültige Werte für mathematische Funktionen, z.B. \mbox{\code{acos(1.00000000000001)}}? + \item Falsches Runden bei negativen Zahlen? Abschneiden $\neq$ Abrunden! + \item genügend Präzision oder Output in wissenschaftlicher Notation (\code{1e-25})? + \item Kann \code{-0.000} ausgegeben werden? + \end{itemize} + + \item \textbf{Wrong Answer:} + \begin{itemize} + \item Lies Aufgabe erneut. Sorgfältig! + \item Mehrere Testfälle in einer Datei? Probiere gleichen Testcase mehrfach hintereinander. + \item Integer Overflow? Teste maximale Eingabegrößen und mache Überschlagsrechnung. + \item Ausgabeformat im 'unmöglich'-Fall überprüfen. + \item Ist das Ergebnis modulo einem Wert? + \item Integer Division rundet zur $0$ $\neq$ abrunden. + \item Eingabegrößen überprüfen. Sonderfälle ausprobieren. + \begin{itemize} + \item $n = 0$, $n = -1$, $n = 1$, $n = 2^{31}-1$, $n = -2^{31}$ + \item $n$ gerade/ungerade + \item Graph ist leer/enthält nur einen Knoten. + \item Liste ist leer/enthält nur ein Element. + \item Graph ist Multigraph (enthält Schleifen/Mehrfachkanten). + \item Sind Kanten gerichtet/ungerichtet? + \item Kolineare Punkte existieren. + \item Polygon ist konkav/selbstschneidend. + \end{itemize} + \item Bei DP/Rekursion: Stimmt Basisfall? + \item Unsicher bei benutzten STL-Funktionen? + \end{itemize} +\end{itemize} diff --git a/content/other/pbs.cpp b/content/other/pbs.cpp new file mode 100644 index 0000000..7cb60e5 --- /dev/null +++ b/content/other/pbs.cpp @@ -0,0 +1,19 @@ +// Q = # of queries, bucket sort is sometimes faster +vector low(Q, 0), high(Q, MAX_OPERATIONS); +while (true) { + vector> focus; + for (int i = 0; i < Q; i++) if (low[i] < high[i]) { + focus.emplace_back((low[i] + high[i]) / 2, i); + } + if (focus.empty()) break; + sort(all(focus)); + + // reset simulation + for (int step = 0; auto [mid, i] : focus) { + while (step <= mid) { + // simulation step + step++; + } + if (/* requirement already fulfilled */) high[i] = mid; + else low[i] = mid + 1; +}} // answer in low (and high) diff --git a/content/other/pragmas.cpp b/content/other/pragmas.cpp new file mode 100644 index 0000000..a39c850 --- /dev/null +++ b/content/other/pragmas.cpp @@ -0,0 +1,6 @@ +#pragma GCC optimize("Ofast") +#pragma GCC optimize ("unroll-loops") +#pragma GCC target("sse,sse2,sse3,ssse3,sse4," + "popcnt,abm,mmx,avx,tune=native") +#pragma GCC target("fpmath=sse,sse2") // no excess precision +#pragma GCC target("fpmath=387") // force excess precision diff --git a/content/other/sos.cpp b/content/other/sos.cpp new file mode 100644 index 0000000..01bc44c --- /dev/null +++ b/content/other/sos.cpp @@ -0,0 +1,6 @@ +vector res(in); +for (int i = 1; i < sz(res); i *= 2) { + for (int mask = 0; mask < sz(res); mask++){ + if (mask & i) { + res[mask] += res[mask ^ i]; +}}} diff --git a/content/other/split.cpp b/content/other/split.cpp new file mode 100644 index 0000000..5519f60 --- /dev/null +++ b/content/other/split.cpp @@ -0,0 +1,10 @@ +// Zerlegt s anhand aller Zeichen in delim (verändert s). +vector split(string& s, string delim) { + vector result; char *token; + token = strtok(s.data(), delim.c_str()); + while (token != nullptr) { + result.emplace_back(token); + token = strtok(nullptr, delim.c_str()); + } + return result; +} diff --git a/content/other/stress.sh b/content/other/stress.sh new file mode 100644 index 0000000..d264c2a --- /dev/null +++ b/content/other/stress.sh @@ -0,0 +1,7 @@ +for i in {1..1000}; do + printf "\r$i" + python3 gen.py > input # generate test with gen.py + ./a.out < input > out # execute ./a.out + ./b.out < input > out2 # execute ./b.out + diff out out2 || break +done diff --git a/content/other/stuff.cpp b/content/other/stuff.cpp new file mode 100644 index 0000000..41543ad --- /dev/null +++ b/content/other/stuff.cpp @@ -0,0 +1,29 @@ +// Alles-Header. +#include + +// Setzt deutsche Tastaturlayout / toggle mit alt + space +setxkbmap de +setxkbmap de,us -option grp:alt_space_toggle + +// Schnelle Ein-/Ausgabe mit cin/cout. +cin.tie(nullptr)->ios::sync_with_stdio(false); + +// Set mit eigener Sortierfunktion. +set set1(comp); + +// STL-Debugging, Compiler flags. +-D_GLIBCXX_DEBUG +#define _GLIBCXX_DEBUG + +// 128-Bit Integer/Float. Muss zum Einlesen/Ausgeben +// in einen int oder long long gecastet werden. +__int128, __float128 + +// float mit Decimaldarstellung +#include +std::decimal::decimal128 + +// 1e18 < INF < Max_Value / 2 +constexpr ll INF = 0x3FFF'FFFF'FFFF'FFFFll; +// 1e9 < INF < Max_Value / 2 +constexpr int INF = 0x3FFF'FFFF; diff --git a/content/other/timed.cpp b/content/other/timed.cpp new file mode 100644 index 0000000..b3ed4ef --- /dev/null +++ b/content/other/timed.cpp @@ -0,0 +1,3 @@ +int times = clock(); +//run for 900ms +while (1000*(clock()-times)/CLOCKS_PER_SEC < 900) {...} diff --git a/content/python/io.py b/content/python/io.py new file mode 100644 index 0000000..aa16d4c --- /dev/null +++ b/content/python/io.py @@ -0,0 +1,3 @@ +n, m = map(int, input().split()) +A = list(map(int, input().split())) +print(n, m, *A) diff --git a/content/python/python.tex b/content/python/python.tex new file mode 100644 index 0000000..a778b85 --- /dev/null +++ b/content/python/python.tex @@ -0,0 +1,10 @@ +\section{Python} +\bgroup +\lstset{language=Python} + +\subsection{Recursion} +\sourcecode{python/recursion.py} + +\subsection{IO} +\sourcecode{python/io.py} +\egroup diff --git a/content/python/recursion.py b/content/python/recursion.py new file mode 100644 index 0000000..45e0147 --- /dev/null +++ b/content/python/recursion.py @@ -0,0 +1,2 @@ +import sys +sys.setrecursionlimit(1000_007) diff --git a/content/string/ahoCorasick.cpp b/content/string/ahoCorasick.cpp new file mode 100644 index 0000000..eac312c --- /dev/null +++ b/content/string/ahoCorasick.cpp @@ -0,0 +1,52 @@ +constexpr ll ALPHABET_SIZE = 26, OFFSET = 'a'; +struct AhoCorasick { + struct vert { + int suffix = 0, ch, cnt = 0; + array nxt = {}; + + vert(int p, int c) : suffix(-p), ch(c) {} + }; + vector aho = {{0, -1}}; + + int addString(string &s) { + int v = 0; + for (auto c : s) { + int idx = c - OFFSET; + if (!aho[v].nxt[idx]) { + aho[v].nxt[idx] = sz(aho); + aho.emplace_back(v, idx); + } + v = aho[v].nxt[idx]; + } + aho[v].cnt++; + return v; // trie node index of pattern (pattern state) + } + + int getSuffix(int v) { + if (aho[v].suffix < 0) { + aho[v].suffix = go(getSuffix(-aho[v].suffix), aho[v].ch); + } + return aho[v].suffix; + } + + int go(int v, int idx) { // Root is v=0, idx is char - OFFSET + if (aho[v].nxt[idx]) return aho[v].nxt[idx]; + else return v == 0 ? 0 : go(getSuffix(v), idx); + } + + vector> adj; + vector dp; + void buildGraph() { + adj.resize(sz(aho)); + dp.assign(sz(aho), 0); + for (int i = 1; i < sz(aho); i++) { + adj[getSuffix(i)].push_back(i); + }} + + void dfs(int v = 0) { // dp on tree + for (int u : adj[v]) { + //dp[u] = dp[v] + aho[u].cnt; // pattern count + dfs(u); + dp[v] += dp[u]; // no of matches + }} +}; diff --git a/content/string/deBruijn.cpp b/content/string/deBruijn.cpp new file mode 100644 index 0000000..e829137 --- /dev/null +++ b/content/string/deBruijn.cpp @@ -0,0 +1,7 @@ +string deBruijn(int n, char mi = '0', char ma = '1') { + string res, c(1, mi); + do { + if (n % sz(c) == 0) res += c; + } while(next(c, n, mi, ma)); + return res; +} diff --git a/content/string/duval.cpp b/content/string/duval.cpp new file mode 100644 index 0000000..bf36cce --- /dev/null +++ b/content/string/duval.cpp @@ -0,0 +1,21 @@ +vector> duval(const string& s) { + vector> res; + for (int i = 0; i < sz(s);) { + int j = i + 1, k = i; + for (; j < sz(s) && s[k] <= s[j]; j++) { + if (s[k] < s[j]) k = i; + else k++; + } + while (i <= k) { + res.push_back({i, i + j - k}); + i += j - k; + }} + return res; +} + +int minrotation(const string& s) { + auto parts = duval(s+s); + for (auto [l, r] : parts) { + if (l < sz(s) && r >= sz(s)) { + return l; +}}} diff --git a/content/string/kmp.cpp b/content/string/kmp.cpp new file mode 100644 index 0000000..421479e --- /dev/null +++ b/content/string/kmp.cpp @@ -0,0 +1,20 @@ +vector kmpPreprocessing(const string& sub) { + vector b(sz(sub) + 1); + b[0] = -1; + for (int i = 0, j = -1; i < sz(sub);) { + while (j >= 0 && sub[i] != sub[j]) j = b[j]; + b[++i] = ++j; + } + return b; +} +vector kmpSearch(const string& s, const string& sub) { + vector result, pre = kmpPreprocessing(sub); + for (int i = 0, j = 0; i < sz(s);) { + while (j >= 0 && s[i] != sub[j]) j = pre[j]; + i++; j++; + if (j == sz(sub)) { + result.push_back(i - j); + j = pre[j]; + }} + return result; +} diff --git a/content/string/longestCommonSubsequence.cpp b/content/string/longestCommonSubsequence.cpp new file mode 100644 index 0000000..6c9ea44 --- /dev/null +++ b/content/string/longestCommonSubsequence.cpp @@ -0,0 +1,15 @@ +string lcss(const string& a, const string& b) { + vector> m(sz(a) + 1, vector(sz(b) + 1)); + for (int i = sz(a) - 1; i >= 0; i--) { + for (int j = sz(b) - 1; j >= 0; j--) { + if (a[i] == b[j]) m[i][j] = 1 + m[i+1][j+1]; + else m[i][j] = max(m[i+1][j], m[i][j+1]); + }} // Für die Länge: return m[0][0]; + string res; + for (int j = 0, i = 0; j < sz(b) && i < sz(a);) { + if (a[i] == b[j]) res += a[i++], j++; + else if (m[i][j+1] > m[i+1][j]) j++; + else i++; + } + return res; +} diff --git a/content/string/lyndon.cpp b/content/string/lyndon.cpp new file mode 100644 index 0000000..e44379b --- /dev/null +++ b/content/string/lyndon.cpp @@ -0,0 +1,11 @@ +bool next(string& s, int maxLen, char mi = '0', char ma = '1') { + for (int i = sz(s), j = sz(s); i < maxLen; i++) + s.push_back(s[i % j]); + while(!s.empty() && s.back() == ma) s.pop_back(); + if (s.empty()) { + s = mi; + return false; + } else { + s.back()++; + return true; +}} diff --git a/content/string/manacher.cpp b/content/string/manacher.cpp new file mode 100644 index 0000000..112bd55 --- /dev/null +++ b/content/string/manacher.cpp @@ -0,0 +1,20 @@ +vector manacher(const string& t) { + //transforms "aa" to ".a.a." to find even length palindromes + string s(sz(t) * 2 + 1, '.'); + for (int i = 0; i < sz(t); i++) s[2 * i + 1] = t[i]; + + int mid = 0, r = 0, n = sz(s); + vector pal(n); + for (int i = 1; i < n - 1; i++) { + if (r > i) pal[i] = min(r - i, pal[2 * mid - i]); + while (pal[i] < min(i, n - i - 1) && + s[i + pal[i] + 1] == s[i - pal[i] - 1]) { + pal[i]++; + } + if (i + pal[i] > r) mid = i, r = i + pal[i]; + } + + //convert lengths to constructed string s (optional) + //for (int i = 0; i < n; i++) pal[i] = 2 * pal[i] + 1; + return pal; +} diff --git a/content/string/rollingHash.cpp b/content/string/rollingHash.cpp new file mode 100644 index 0000000..6e914aa --- /dev/null +++ b/content/string/rollingHash.cpp @@ -0,0 +1,18 @@ +// M = 1.7e9 + 9, 1e18L + 9, 2.2e18L + 7 +struct Hash { + static constexpr ll M = 3e18L + 37; + static constexpr ll Q = 318LL << 53; // Random in [SIGMA+1, M) + vector pref = {0}, power = {1}; + + Hash(const string& s) { + for (auto c : s) { // c > 0 + pref.push_back((mul(pref.back(), Q) + c + M) % M); + power.push_back(mul(power.back(), Q)); + }} + + ll operator()(int l, int r) { + return (pref[r] - mul(power[r-l], pref[l]) + M) % M; + } + + static ll mul(__int128 a, ll b) {return a * b % M;} +}; diff --git a/content/string/rollingHashCf.cpp b/content/string/rollingHashCf.cpp new file mode 100644 index 0000000..84b2e4e --- /dev/null +++ b/content/string/rollingHashCf.cpp @@ -0,0 +1,17 @@ +// M = 1.7e9 + 9, 1e18L + 9, 2.2e18L + 7 +struct Hash { + static constexpr ll M = 3e18L + 37; + vector pref = {0}, power = {1}; + + Hash(const string& s, ll Q) { // Q Random in [SIGMA+1, M) + for (auto c : s) { // c > 0 + pref.push_back((mul(pref.back(), Q) + c + M) % M); + power.push_back(mul(power.back(), Q)); + }} + + ll operator()(int l, int r) { + return (pref[r] - mul(power[r-l], pref[l]) + M) % M; + } + + static ll mul(__int128 a, ll b) {return a * b % M;} +}; diff --git a/content/string/string.tex b/content/string/string.tex new file mode 100644 index 0000000..bedabfb --- /dev/null +++ b/content/string/string.tex @@ -0,0 +1,132 @@ +\section{Strings} + +\begin{algorithm}{\textsc{Knuth-Morris-Pratt}-Algorithmus} + \begin{methods} + \method{kmpSearch}{sucht \code{sub} in \code{s}}{\abs{s}+\abs{sub}} + \end{methods} + \sourcecode{string/kmp.cpp} +\end{algorithm} + +\begin{algorithm}{Z-Algorithmus} + \begin{methods}[ll] + $z_i\coloneqq$ Längstes gemeinsames Präfix von $s_0\cdots s_{n-1}$ und $s_i\cdots s_{n-1}$ & \runtime{n} + \end{methods} + Suchen: Z-Algorithmus auf \code{P\$S} ausführen, Positionen mit $z_i=\abs{P}$ zurückgeben + \sourcecode{string/z.cpp} +\end{algorithm} + +\begin{algorithm}{Rolling Hash} + \sourcecode{string/rollingHash.cpp} +\end{algorithm} + +\begin{algorithm}{Pattern Matching mit Wildcards} + Gegeben zwei strings $A$ und $B$,$B$ enthält $k$ \emph{wildcards} enthält. Sei: + \begin{align*} + a_i&=\cos(\alpha_i) + i\sin(\alpha_i) &\text{ mit } \alpha_i&=\frac{2\pi A[i]}{\Sigma}\\ + b_i&=\cos(\beta_i) + i\sin(\beta_i) &\text{ mit } \beta_i&=\begin{cases*} + \frac{2\pi B[\abs{B}-i-1]}{\Sigma} & falls $B[\abs{B}-i-1]\in\Sigma$ \\ + 0 & sonst + \end{cases*} + \end{align*} + $B$ matcht $A$ an stelle $i$ wenn $(b\cdot a)[|B|-1+i]=|B|-k$. + Benutze FFT um $(b\cdot a)$ zu berechnen. +\end{algorithm} + +\begin{algorithm}{\textsc{Manacher}'s Algorithm, Longest Palindrome} + \begin{methods} + \method{init}{transformiert \code{string a}}{n} + \method{manacher}{berechnet Längen der Palindrome in longest}{n} + \end{methods} + \sourcecode{string/manacher.cpp} +\end{algorithm} + +\begin{algorithm}{Longest Common Subsequence} + \begin{methods} + \method{lcss}{findet längste gemeinsame Sequenz}{\abs{a}\*\abs{b}} + \end{methods} + \sourcecode{string/longestCommonSubsequence.cpp} +\end{algorithm} + +\columnbreak +\begin{algorithm}{\textsc{Aho-Corasick}-Automat} + \begin{methods}[ll] + sucht patterns im Text & \runtime{\abs{Text}+\sum\abs{pattern}} + \end{methods} + \begin{enumerate} + \item mit \code{addString(pattern, idx)} Patterns hinzufügen. + \item rufe \code{buildGraph()} auf + \item mit \code{state = go(state, idx)} in nächsten Zustand wechseln. + \item erhöhe dabei \code{dp[state]++} + \item rufe \code{dfs()} auf. In dp[pattern state] stehen die Anzahl der Matches + \end{enumerate} + \sourcecode{string/ahoCorasick.cpp} +\end{algorithm} +\clearpage + +\begin{algorithm}{Lyndon und De-Bruijn} + \begin{itemize} + \item \textbf{Lyndon-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen. + \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von Lyndon-Worten zerlegt werden. + \item Für Lyndon-Worte $u, v$ mit $u SA, LCP; + vector> P; + + SuffixArray(const string& s) : n(sz(s)), SA(n), LCP(n), + P(__lg(2 * n - 1) + 1, vector(n)) { + P[0].assign(all(s)); + iota(all(SA), 0); + sort(all(SA), [&](int a, int b) {return s[a] < s[b];}); + vector x(n); + for (int k = 1, c = 1; c < n; k++, c *= 2) { + iota(all(x), n - c); + for (int ptr = c; int i : SA) if (i >= c) x[ptr++] = i - c; + + vector cnt(k == 1 ? MAX_CHAR : n); + for (int i : P[k-1]) cnt[i]++; + partial_sum(all(cnt), begin(cnt)); + for (int i : x | views::reverse) SA[--cnt[P[k-1][i]]] = i; + + auto p = [&](int i) {return i < n ? P[k-1][i] : -1;}; + for (int i = 1; i < n; i++) { + int a = SA[i-1], b = SA[i]; + P[k][b] = P[k][a] + (p(a) != p(b) || p(a+c) != p(b+c)); + }} + for (int i = 1; i < n; i++) LCP[i] = lcp(SA[i-1], SA[i]); + } + + int lcp(int x, int y) {//x & y are text-indices, not SA-indices + if (x == y) return n - x; + int res = 0; + for (int i = sz(P) - 1; i >= 0 && max(x, y) + res < n; i--) { + if (P[i][x + res] == P[i][y + res]) res |= 1 << i; + } + return res; + } +}; diff --git a/content/string/suffixAutomaton.cpp b/content/string/suffixAutomaton.cpp new file mode 100644 index 0000000..9a68cb3 --- /dev/null +++ b/content/string/suffixAutomaton.cpp @@ -0,0 +1,63 @@ +constexpr int ALPHABET_SIZE = 26; +constexpr char OFFSET = 'a'; +struct SuffixAutomaton { + struct State { + int len, link = -1; + array nxt; // map if large Alphabet + State(int l) : len(l) {fill(all(nxt), -1);} + }; + + vector st = {State(0)}; + int cur = 0; + + SuffixAutomaton(const string& s) { + st.reserve(2 * sz(s)); + for (auto c : s) extend(c - OFFSET); + } + + void extend(int c) { + int p = cur; + cur = sz(st); + st.emplace_back(st[p].len + 1); + for (; p != -1 && st[p].nxt[c] < 0; p = st[p].link) { + st[p].nxt[c] = cur; + } + if (p == -1) { + st[cur].link = 0; + } else { + int q = st[p].nxt[c]; + if (st[p].len + 1 == st[q].len) { + st[cur].link = q; + } else { + st.emplace_back(st[p].len + 1); + st.back().link = st[q].link; + st.back().nxt = st[q].nxt; + for (; p != -1 && st[p].nxt[c] == q; p = st[p].link) { + st[p].nxt[c] = sz(st) - 1; + } + st[q].link = st[cur].link = sz(st) - 1; + }}} + + vector calculateTerminals() { + vector terminals; + for (int p = cur; p != -1; p = st[p].link) { + terminals.push_back(p); + } + return terminals; + } + + // Pair with start index (in t) and length of LCS. + pair longestCommonSubstring(const string& t) { + int v = 0, l = 0, best = 0, bestp = -1; + for (int i = 0; i < sz(t); i++) { + int c = t[i] - OFFSET; + while (v > 0 && st[v].nxt[c] < 0) { + v = st[v].link; + l = st[v].len; + } + if (st[v].nxt[c] >= 0) v = st[v].nxt[c], l++; + if (l > best) best = l, bestp = i; + } + return {bestp - best + 1, best}; + } +}; diff --git a/content/string/suffixTree.cpp b/content/string/suffixTree.cpp new file mode 100644 index 0000000..7112f39 --- /dev/null +++ b/content/string/suffixTree.cpp @@ -0,0 +1,72 @@ +struct SuffixTree { + struct Vert { + int start, end, suf; //s[start...end) along parent edge + map nxt; + }; + string s; + int needsSuffix, pos, remainder, curVert, curEdge, curLen; + // Each Vertex gives its children range as [start, end) + vector tree = {Vert{-1, -1, 0, {}}}; + + SuffixTree(const string& s_) : s(s_) { + needsSuffix = remainder = curVert = curEdge = curLen = 0; + pos = -1; + for (int i = 0; i < sz(s); i++) extend(); + } + + int newVert(int start, int end) { + tree.push_back({start, end, 0, {}}); + return sz(tree) - 1; + } + + void addSuffixLink(int vert) { + if (needsSuffix) tree[needsSuffix].suf = vert; + needsSuffix = vert; + } + + bool fullImplicitEdge(int vert) { + int len = min(tree[vert].end, pos + 1) - tree[vert].start; + if (curLen >= len) { + curEdge += len; + curLen -= len; + curVert = vert; + return true; + } else { + return false; + }} + + void extend() { + pos++; + needsSuffix = 0; + remainder++; + while (remainder) { + if (curLen == 0) curEdge = pos; + if (!tree[curVert].nxt.count(s[curEdge])) { + int leaf = newVert(pos, sz(s)); + tree[curVert].nxt[s[curEdge]] = leaf; + addSuffixLink(curVert); + } else { + int nxt = tree[curVert].nxt[s[curEdge]]; + if (fullImplicitEdge(nxt)) continue; + if (s[tree[nxt].start + curLen] == s[pos]) { + curLen++; + addSuffixLink(curVert); + break; + } + int split = newVert(tree[nxt].start, + tree[nxt].start + curLen); + tree[curVert].nxt[s[curEdge]] = split; + int leaf = newVert(pos, sz(s)); + tree[split].nxt[s[pos]] = leaf; + tree[nxt].start += curLen; + tree[split].nxt[s[tree[nxt].start]] = nxt; + addSuffixLink(split); + } + remainder--; + if (curVert == 0 && curLen) { + curLen--; + curEdge = pos - remainder + 1; + } else { + curVert = tree[curVert].suf ? tree[curVert].suf : 0; + }}} +}; \ No newline at end of file diff --git a/content/string/trie.cpp b/content/string/trie.cpp new file mode 100644 index 0000000..03cf947 --- /dev/null +++ b/content/string/trie.cpp @@ -0,0 +1,35 @@ +// Zahlenwerte müssen bei 0 beginnen und zusammenhängend sein. +constexpr int ALPHABET_SIZE = 2; +struct node { + int words, ends; + array nxt; + node() : words(0), ends(0) {fill(all(nxt), -1);} +}; +vector trie = {node()}; + +int traverse(const vector& word, int x) { + int id = 0; + for (int c : word) { + if (id < 0 || (trie[id].words == 0 && x <= 0)) return -1; + trie[id].words += x; + if (trie[id].nxt[c] < 0 && x > 0) { + trie[id].nxt[c] = sz(trie); + trie.emplace_back(); + } + id = trie[id].nxt[c]; + } + trie[id].words += x; + trie[id].ends += x; + return id; +} + +int insert(const vector& word) { + return traverse(word, 1); +} + +bool erase(const vector& word) { + int id = traverse(word, 0); + if (id < 0 || trie[id].ends <= 0) return false; + traverse(word, -1); + return true; +} diff --git a/content/string/z.cpp b/content/string/z.cpp new file mode 100644 index 0000000..069fa38 --- /dev/null +++ b/content/string/z.cpp @@ -0,0 +1,10 @@ +vector Z(const string& s) { + int n = sz(s); + vector z(n); + for (int i = 1, x = 0; i < n; i++) { + z[i] = max(0, min(z[i - x], x + z[x] - i)); + while (i + z[i] < n && s[z[i]] == s[i + z[i]]) { + x = i, z[i]++; + }} + return z; +} diff --git a/content/tcr.tex b/content/tcr.tex new file mode 100644 index 0000000..b327b37 --- /dev/null +++ b/content/tcr.tex @@ -0,0 +1,65 @@ + +%maybe size 9pt if too many pages +\documentclass[a4paper,fontsize=7.8pt]{scrartcl} + +% General information. +\newcommand{\teamname}{Kindergarten Timelimit} +\newcommand{\university}{Karlsruhe Institute of Technology} + +% Options +\newif\ifoptional +%\optionaltrue + +% Font encoding. +\usepackage[T1]{fontenc} +\usepackage[ngerman]{babel} +\usepackage[utf8]{inputenc} +\usepackage[hidelinks,pdfencoding=auto]{hyperref} + +% Include headers. +\usepackage{latexHeaders/layout} +\usepackage{latexHeaders/math} +\usepackage{latexHeaders/code} +\usepackage{latexHeaders/commands} + +% Title and author information. +\title{Team Contest Reference} +\author{\teamname \\ \university} +\date{\today} +\begin{document} + +% Titlepage with table of contents. +\setlength{\columnsep}{1cm} +\optional{ +\maketitle +\begin{multicols*}{3} + \tableofcontents +\end{multicols*} +} + +\newpage + +% Content. +\begin{multicols*}{3} + \input{datastructures/datastructures} + \input{graph/graph} + \input{geometry/geometry} + \input{math/math} +\end{multicols*} + \clearpage + \input{math/tables} +\begin{multicols*}{3} + \input{string/string} + \input{python/python} + \input{other/other} + \input{template/template} + \clearpage + \ifodd\value{page} + \else + \null + \thispagestyle{empty} + \clearpage + \fi + \input{tests/test} +\end{multicols*} +\end{document} diff --git a/content/template/console.sh b/content/template/console.sh new file mode 100644 index 0000000..31885e9 --- /dev/null +++ b/content/template/console.sh @@ -0,0 +1,2 @@ +alias comp="g++ -std=gnu++17 -O2 -Wall -Wextra -Wconversion -Wshadow" +alias dbg="comp -g -fsanitize=address,undefined" diff --git a/content/template/template.cpp b/content/template/template.cpp new file mode 100644 index 0000000..c9a492c --- /dev/null +++ b/content/template/template.cpp @@ -0,0 +1,17 @@ +#include +using namespace std; + +#define tsolve int t; cin >> t; while(t--) solve +#define all(x) ::begin(x), ::end(x) +#define sz(x) (ll)::size(x) + +using ll = long long; +using ld = long double; + +void solve() {} + +int main() { + cin.tie(0)->sync_with_stdio(false); + cout << setprecision(16); + solve(); +} diff --git a/content/template/template.tex b/content/template/template.tex new file mode 100644 index 0000000..bf82199 --- /dev/null +++ b/content/template/template.tex @@ -0,0 +1,9 @@ +\section{Template} + +\begin{algorithm}{C++} + \sourcecode{template/template.cpp} +\end{algorithm} + +\begin{algorithm}{Console} + \sourcecode{template/console.sh} +\end{algorithm} diff --git a/content/tests/gcc5bug.cpp b/content/tests/gcc5bug.cpp new file mode 100644 index 0000000..f49603e --- /dev/null +++ b/content/tests/gcc5bug.cpp @@ -0,0 +1,4 @@ +//https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68203 +struct A { + pair values[1000000]; +}; diff --git a/content/tests/precision.cpp b/content/tests/precision.cpp new file mode 100644 index 0000000..0c81ae1 --- /dev/null +++ b/content/tests/precision.cpp @@ -0,0 +1,8 @@ +#include + +int main() { + cout << "Mode: " << FLT_EVAL_METHOD << endl; + double a = atof("1.2345678"); + double b = a*a; + cout << b - 1.52415765279683990130 << '\n'; +} diff --git a/content/tests/test.tex b/content/tests/test.tex new file mode 100644 index 0000000..80ac037 --- /dev/null +++ b/content/tests/test.tex @@ -0,0 +1,43 @@ +\section{Tests} +Dieser Abschnitt enthält lediglich Dinge die während der Practicesession getestet werden sollten! + +\subsection{GCC} +\begin{itemize} + \item sind c++14 Feature vorhanden? + \item sind c++17 Feature vorhanden? + \item kompiliert dieser Code: +\end{itemize} +\sourcecode{tests/gcc5bug.cpp} +\begin{itemize} + \item funktioniert \code{__int128}? + \item funktionieren Pragmas? + \item funktionieren \code{constexpr} zur Compilezeit (+Zeitlimit)? + \item wie groß ist \code{sizeof(char*)}? + \item wie groß ist \code{RAND_MAX}? + \item funktioniert \code{random_device}? (und gib es unerschiedliche Ergebnisse?) + \item funktioniert \code{clock()}? +\end{itemize} + +\subsection{Python} +\begin{itemize} + \item Rekursionslimit? +\end{itemize} + +\subsection{Judge} +\begin{itemize} + \item ist der Checker casesensitive? + \item wie werden zusätzliches Whitespacecharacter bei sonst korrektem Output behandelt? + \item vergleiche ausführungszeit auf dem judge und lokal (z.b. mit Primzahl Sieb) +\end{itemize} +\sourcecode{tests/whitespace.cpp} + +\subsection{Precision} +\begin{itemize} + \item Mode $0$ means no excess precision + \item Mode $2$ means excess precision (all operations in $80$\,bit floats) +\end{itemize} +\begin{itemize} + \item Result $0$ without excess precision (expected floating point error) + \item \textasciitilde$8e^{-17}$ with excess precision (real value) +\end{itemize} +\sourcecode{tests/precision.cpp} diff --git a/content/tests/whitespace.cpp b/content/tests/whitespace.cpp new file mode 100644 index 0000000..d4abf47 --- /dev/null +++ b/content/tests/whitespace.cpp @@ -0,0 +1 @@ +"\r\r\r\n\t \r\n\r" diff --git a/datastructures/LCT.cpp b/datastructures/LCT.cpp deleted file mode 100644 index c1dd278..0000000 --- a/datastructures/LCT.cpp +++ /dev/null @@ -1,178 +0,0 @@ -constexpr ll queryDefault = 0; -constexpr ll updateDefault = 0; - -ll _modify(ll x, ll y) { - return x + y; -} - -ll _query(ll x, ll y) { - return x + y; -} - -ll _update(ll delta, int length) { - if (delta == updateDefault) return updateDefault; - //ll result = delta - //for (int i=1; ileft != this && - parent->right != this); - } - - void push() { - if (revert) { - revert = false; - swap(left, right); - if (left) left->revert ^= 1; - if (right) right->revert ^= 1; - } - nodeValue = joinValueDelta(nodeValue, delta); - subTreeValue = joinValueDelta(subTreeValue, - _update(delta, size)); - if (left) left->delta = joinDeltas(left->delta, delta); - if (right) right->delta = joinDeltas(right->delta, delta); - delta = updateDefault; - } - - ll getSubtreeValue() { - return joinValueDelta(subTreeValue, _update(delta, size)); - } - - void update() { - subTreeValue = joinValueDelta(nodeValue, delta); - size = 1; - if (left) { - subTreeValue = _query(subTreeValue, - left->getSubtreeValue()); - size += left->size; - } - if (right) { - subTreeValue = _query(subTreeValue, - right->getSubtreeValue()); - size += right->size; - }} - }; - - vector nodes; - - LCT(int n) : nodes(n) { - for (int i = 0; i < n; i++) nodes[i].id = i; - } - - void connect(Node* ch, Node* p, int isLeftChild) { - if (ch) ch->parent = p; - if (isLeftChild >= 0) { - if (isLeftChild) p->left = ch; - else p->right = ch; - }} - - void rotate(Node* x) { - Node* p = x->parent; - Node* g = p->parent; - bool isRootP = p->isRoot(); - bool leftChildX = (x == p->left); - - connect(leftChildX ? x->right : x->left, p, leftChildX); - connect(p, x, !leftChildX); - connect(x, g, isRootP ? -1 : p == g->left); - p->update(); - } - - void splay(Node* x) { - while (!x->isRoot()) { - Node* p = x->parent; - Node* g = p->parent; - if (!p->isRoot()) g->push(); - p->push(); - x->push(); - if (!p->isRoot()) rotate((x == p->left) == - (p == g->left) ? p : x); - rotate(x); - } - x->push(); - x->update(); - } - - Node* expose(Node* x) { - Node* last = nullptr; - for (Node* y = x; y; y = y->parent) { - splay(y); - y->left = last; - last = y; - } - splay(x); - return last; - } - - void makeRoot(Node* x) { - expose(x); - x->revert ^= 1; - } - - bool connected(Node* x, Node* y) { - if (x == y) return true; - expose(x); - expose(y); - return x->parent; - } - - void link(Node* x, Node* y) { - assert(!connected(x, y)); // not yet connected! - makeRoot(x); - x->parent = y; - } - - void cut(Node* x, Node* y) { - makeRoot(x); - expose(y); - //must be a tree edge! - assert(!(y->right != x || x->left != nullptr)); - y->right->parent = nullptr; - y->right = nullptr; - } - - Node* lca(Node* x, Node* y) { - assert(connected(x, y)); - expose(x); - return expose(y); - } - - ll query(Node* from, Node* to) { - makeRoot(from); - expose(to); - if (to) return to->getSubtreeValue(); - return queryDefault; - } - - void modify(Node* from, Node* to, ll delta) { - makeRoot(from); - expose(to); - to->delta = joinDeltas(to->delta, delta); - } -}; diff --git a/datastructures/RMQ.cpp b/datastructures/RMQ.cpp deleted file mode 100644 index 401cca4..0000000 --- a/datastructures/RMQ.cpp +++ /dev/null @@ -1,27 +0,0 @@ -vector values; -vector> rmq; - -int select(int a, int b) { - return values[a] <= values[b] ? a : b; -} - -void build() { - for(int i = 0, s = 1, ss = 1; s <= sz(values); ss=s, s*=2, i++) { - for(int l = 0; l + s <= sz(values); l++) { - if(i == 0) rmq[0][l] = l; - else { - int r = l + ss; - rmq[i][l] = select(rmq[i-1][l], rmq[i-1][r]]); -}}}} - -void init(const vector& v) { - values = v; - rmq = vector>(__lg(sz(values))+1, vector(sz(values))); - build(); -} - -int query(int l, int r) { - if(l >= r) return l; - int s = __lg(r-l); r = r - (1 << s); - return select(rmq[s][l],rmq[s][r]); -} diff --git a/datastructures/bitset.cpp b/datastructures/bitset.cpp deleted file mode 100644 index a89746c..0000000 --- a/datastructures/bitset.cpp +++ /dev/null @@ -1,7 +0,0 @@ -bitset<10> bits(0b000010100); -bits._Find_first(); //2 -bits._Find_next(2); //4 -bits._Find_next(4); //10 bzw. N -bits[x] = 1; //not bits.set(x) or bits.reset(x)! -bits[x].flip(); //not bits.flip(x)! -bits.count() //number of set bits diff --git a/datastructures/datastructures.tex b/datastructures/datastructures.tex deleted file mode 100644 index d35dfb0..0000000 --- a/datastructures/datastructures.tex +++ /dev/null @@ -1,136 +0,0 @@ -\section{Datenstrukturen} - -\begin{algorithm}{Segmentbaum} - \begin{methods} - \method{SegTree}{baut den Baum auf}{n} - \method{query}{findet Summe über [l, r)}{\log(n)} - \method{update}{ändert einen Wert}{\log(n)} - \end{methods} - \sourcecode{datastructures/segmentTree.cpp} - - \subsubsection{Lazy Propagation} - Assignment modifications, sum queries \\ - \method{lower\_bound}{erster Index in [l, r) $\geq$ x (erfordert max-combine)}{\log(n)} - \sourcecode{datastructures/lazyPropagation.cpp} -\end{algorithm} - -\begin{algorithm}{Wavelet Tree} - \begin{methods} - \method{Constructor}{baut den Baum auf}{n\*\log(n)} - \method{kth}{sort $[l, r)[k]$}{\log(n)} - \method{countSmaller}{Anzahl elemente in $[l, r)$ kleiner als $k$}{\log(n)} - \end{methods} - \sourcecode{datastructures/waveletTree.cpp} -\end{algorithm} -\columnbreak - -\begin{algorithm}{Fenwick Tree} - \begin{methods} - \method{init}{baut den Baum auf}{n\*\log(n)} - \method{prefix\_sum}{summe von [0, i]}{\log(n)} - \method{update}{addiert ein Delta zu einem Element}{\log(n)} - \end{methods} - \sourcecode{datastructures/fenwickTree.cpp} - - \begin{methods} - \method{init}{baut den Baum auf}{n\*\log(n)} - \method{prefix\_sum}{summe von [0, i]}{\log(n)} - \method{update}{addiert ein Delta zu allen Elementen [l, r)}{\log(n)} - \end{methods} - \sourcecode{datastructures/fenwickTree2.cpp} -\end{algorithm} - -\begin{algorithm}{STL-Rope (Implicit Cartesian Tree)} - \sourcecode{datastructures/stlRope.cpp} -\end{algorithm} -\columnbreak - -\begin{algorithm}{(Implicit) Treap (Cartesian Tree)} - \begin{methods} - \method{insert}{fügt wert $\mathit{val}$ an stelle $i$ ein (verschiebt alle Positionen >= $i$)}{\log(n)} - \method{remove}{löscht werte $[i,i+\mathit{count})$}{\log(n)} - \end{methods} - \sourcecode{datastructures/treap2.cpp} -\end{algorithm} - -\begin{algorithm}{Range Minimum Query} - \begin{methods} - \method{init}{baut Struktur auf}{n\*\log(n)} - \method{queryIdempotent}{Index des Minimums in [l, r)}{1} - \end{methods} - \begin{itemize} - \item \code{better}-Funktion muss idempotent sein! - \end{itemize} - \sourcecode{datastructures/sparseTable.cpp} -\end{algorithm} - -\begin{algorithm}{STL-Bitset} - \sourcecode{datastructures/bitset.cpp} -\end{algorithm} - -\begin{algorithm}{Link-Cut-Tree} - \begin{methods} - \method{Constructor}{baut Wald auf}{n} - \method{connected}{prüft ob zwei Knoten im selben Baum liegen}{\log(n)} - \method{link}{fügt $\{x,y\}$ Kante ein}{\log(n)} - \method{cut}{entfernt $\{x,y\}$ Kante}{\log(n)} - \method{lca}{berechnet LCA von $x$ und $y$}{\log(n)} - \method{query}{berechnet \code{query} auf den Knoten des $xy$-Pfades}{\log(n)} - \method{modify}{erhöht jeden wert auf dem $xy$-Pfad}{\log(n)} - \end{methods} - \sourcecode{datastructures/LCT.cpp} -\end{algorithm} -\clearpage - -\begin{algorithm}{Lichao} - \sourcecode{datastructures/lichao.cpp} -\end{algorithm} - -\begin{algorithm}{Policy Based Data Structures} - \textbf{Wichtig:} Verwende \code{p.swap(p2)} anstatt \code{swap(p, p2)}! - \sourcecode{datastructures/stlPriorityQueue.cpp} - \columnbreak - \sourcecode{datastructures/pbds.cpp} -\end{algorithm} - -\begin{algorithm}{Lower/Upper Envelope (Convex Hull Optimization)} - Um aus einem lower envelope einen upper envelope zu machen (oder umgekehrt), einfach beim Einfügen der Geraden $m$ und $b$ negieren. - \sourcecode{datastructures/monotonicConvexHull.cpp} - \sourcecode{datastructures/dynamicConvexHull.cpp} -\end{algorithm} - -\begin{algorithm}{Union-Find} - \begin{methods} - \method{init}{legt $n$ einzelne Unions an}{n} - \method{findSet}{findet den Repräsentanten}{\log(n)} - \method{unionSets}{vereint 2 Mengen}{\log(n)} - \method{m\*findSet + n\*unionSets}{Folge von Befehlen}{n+m\*\alpha(n)} - \end{methods} - \sourcecode{datastructures/unionFind.cpp} -\end{algorithm} -\columnbreak - -\begin{algorithm}{Persistent} - \begin{methods} - \method{get}{berechnet Wert zu Zeitpunkt $t$}{\log(t)} - \method{set}{ändert Wert zu Zeitpunkt $t$}{\log(t)} - \method{reset}{setzt die Datenstruktur auf Zeitpunkt $t$}{1} - \end{methods} - \sourcecode{datastructures/persistent.cpp} - \sourcecode{datastructures/persistentArray.cpp} -\end{algorithm} - -\begin{algorithm}[optional]{Range Minimum Query} - \begin{methods} - \method{init}{baut Struktur auf}{n\*\log(n)} - \method{query}{Index des Minimums in [l, r)}{1} - \end{methods} - \sourcecode{datastructures/RMQ.cpp} -\end{algorithm} - -\begin{algorithm}[optional]{Erste unbenutzte natürliche Zahl} - \begin{methods} - \method{get\_first\_unused}{findet kleinste unbenutzte Zahl}{\log(n)} - \end{methods} - \sourcecode{datastructures/firstUnused.cpp} -\end{algorithm} diff --git a/datastructures/dynamicConvexHull.cpp b/datastructures/dynamicConvexHull.cpp deleted file mode 100644 index d669847..0000000 --- a/datastructures/dynamicConvexHull.cpp +++ /dev/null @@ -1,36 +0,0 @@ -struct Line { - mutable ll m, b, p; - bool operator<(const Line& o) const {return m < o.m;} - bool operator<(ll x) const {return p < x;} -}; - -struct HullDynamic : multiset> { - // (for doubles, use inf = 1/.0, div(a,b) = a/b) - ll div(ll a, ll b) {return a / b - ((a ^ b) < 0 && a % b);} - - bool isect(iterator x, iterator y) { - if (y == end()) {x->p = INF; return false;} - if (x->m == y->m) x->p = x->b > y->b ? INF : -INF; - else x->p = div(y->b - x->b, x->m - y->m); - return x->p >= y->p; - } - - void add(ll m, ll b) { - auto x = insert({m, b, 0}); - while (isect(x, next(x))) erase(next(x)); - if (x != begin()) { - x--; - if (isect(x, next(x))) { - erase(next(x)); - isect(x, next(x)); - }} - while (x != begin() && prev(x)->p >= x->p) { - x--; - isect(x, erase(next(x))); - }} - - ll query(ll x) { - auto l = *lower_bound(x); - return l.m * x + l.b; - } -}; diff --git a/datastructures/fenwickTree.cpp b/datastructures/fenwickTree.cpp deleted file mode 100644 index cac3cf8..0000000 --- a/datastructures/fenwickTree.cpp +++ /dev/null @@ -1,15 +0,0 @@ -vector tree; - -void update(int i, ll val) { - for (i++; i < sz(tree); i += (i & (-i))) tree[i] += val; -} - -void init(int n) { - tree.assign(n + 1,0); -} - -ll prefix_sum(int i) { - ll sum = 0; - for (i++; i > 0; i -= (i & (-i))) sum += tree[i]; - return sum; -} diff --git a/datastructures/fenwickTree2.cpp b/datastructures/fenwickTree2.cpp deleted file mode 100644 index ff87e2e..0000000 --- a/datastructures/fenwickTree2.cpp +++ /dev/null @@ -1,21 +0,0 @@ -vector add, mul; - -void update(int l, int r, ll val) { - for (int tl = l + 1; tl < sz(add); tl += tl&(-tl)) - add[tl] += val, mul[tl] -= val * l; - for (int tr = r + 1; tr < sz(add); tr += tr&(-tr)) - add[tr] -= val, mul[tr] += val * r; -} - -void init(vector& v) { - mul.assign(sz(v) + 1,0); - add.assign(sz(v) + 1,0); - for(int i = 0; i < sz(v); i++) update(i, i + 1, v[i]); -} - -ll prefix_sum (int i) { - ll res = 0; i++; - for (int ti = i; ti > 0; ti -= ti&(-ti)) - res += add[ti] * i + mul[ti]; - return res; -} diff --git a/datastructures/firstUnused.cpp b/datastructures/firstUnused.cpp deleted file mode 100644 index 16b0c21..0000000 --- a/datastructures/firstUnused.cpp +++ /dev/null @@ -1,13 +0,0 @@ -// Erste natürliche Zahl nicht im set used. -set used; -int unusedCounter = 1; - -int get_first_unused() { // Laufzeit: O(log n) amortisiert. - auto it = used.lower_bound(unusedCounter); - while (it != used.end() && *it == unusedCounter) { - it++; - unusedCounter++; - } - used.insert(unusedCounter); - return unusedCounter++; -} diff --git a/datastructures/lazyPropagation.cpp b/datastructures/lazyPropagation.cpp deleted file mode 100644 index 0fe7bbe..0000000 --- a/datastructures/lazyPropagation.cpp +++ /dev/null @@ -1,83 +0,0 @@ -struct SegTree { - using T = ll; using U = ll; - int n, h; - static constexpr T E = 0; // Neutral element for combine - static constexpr U UF = 0; // Unused value by updates - vector tree; vector lazy; - vector k; // size of segments (optional) - - SegTree(const vector& a) : n(sz(a) + 1), tree(2 * n, E), - //SegTree(int size, T def = E) : n(size + 1), tree(2 * n, def), - h(__lg(2 * n)), lazy(n, UF), k(2 * n, 1) { - copy(all(a), tree.begin() + n); - for (int i = n - 1; i > 0; i--) { - k[i] = 2 * k[2 * i]; - tree[i] = comb(tree[2 * i], tree[2 * i + 1]); - }} - - T comb(T a, T b) {return a + b;} // Modify this + E - - void apply(int i, U val) { // And this + UF - tree[i] = val * k[i]; - if (i < n) lazy[i] = val; // Don't forget this - } - - void push_down(int i) { - if (lazy[i] != UF) { - apply(2 * i, lazy[i]); - apply(2 * i + 1, lazy[i]); - lazy[i] = UF; - }} - - void push(int i) { - for (int s = h; s > 0; s--) push_down(i >> s); - } - - void build(int i) { - while (i /= 2) { - push_down(i); - tree[i] = comb(tree[2 * i], tree[2 * i + 1]); - }} - - void update(int l, int r, U val) { - l += n, r += n; - int l0 = l, r0 = r; - push(l0), push(r0 - 1); - for (; l < r; l /= 2, r /= 2) { - if (l&1) apply(l++, val); - if (r&1) apply(--r, val); - } - build(l0), build(r0 - 1); - } - - T query(int l, int r) { - l += n, r += n; - push(l), push(r - 1); - T resL = E, resR = E; - for (; l < r; l /= 2, r /= 2) { - if (l&1) resL = comb(resL, tree[l++]); - if (r&1) resR = comb(tree[--r], resR); - } - return comb(resL, resR); - } - - // Optional: - ll lower_bound(int l, int r, T x) { - l += n, r += n; - push(l), push(r - 1); - int a[64] = {}, lp = 0, rp = 64; - for (; l < r; l /= 2, r /= 2) { - if (l&1) a[lp++] = l++; - if (r&1) a[--rp] = --r; - } - for (int i : a) if (i != 0 && tree[i] >= x) { // Modify this - while (i < n) { - push_down(i); - if (tree[2 * i] >= x) i = 2 * i; // And this - else i = 2 * i + 1; - } - return i - n; - } - return -1; - } -}; diff --git a/datastructures/lichao.cpp b/datastructures/lichao.cpp deleted file mode 100644 index f66778e..0000000 --- a/datastructures/lichao.cpp +++ /dev/null @@ -1,46 +0,0 @@ -vector xs; // IMPORTANT: Initialize before constructing! -int findX(int i) {return lower_bound(all(xs), i) - begin(xs);} - -struct Fun { // Default: Linear function. Change as needed. - ll m, c; - ll operator()(int x) {return m*xs[x] + c;} -}; - -// Default: Computes min. Change lines with comment for max. -struct Lichao { - static constexpr Fun id = {0, inf}; // {0, -inf} - int n, cap; - vector seg; - Lichao() : n(sz(xs)), cap(2<<__lg(n)), seg(2*cap, id) {} - - void _insert(Fun f, int l, int r, int i) { - while (i < 2*cap){ - int m = (l+r)/2; - if (m >= n) {r = m; i = 2*i; continue;} - Fun &g = seg[i]; - if (f(m) < g(m)) swap(f, g); // > - if (f(l) < g(l)) r = m, i = 2*i; // > - else l = m, i = 2*i+1; - }} - void insert(Fun f) {_insert(f, 0, cap, 1);} - - void _segmentInsert(Fun f, int l, int r, int a, int b, int i) { - if (l <= a && b <= r) _insert(f, a, b, i); - else if (a < r && l < b){ - int m = (a+b)/2; - _segmentInsert(f, l, r, a, m, 2*i); - _segmentInsert(f, l, r, m, b, 2*i+1); - }} - void segmentInsert(Fun f, ll l, ll r) { - _segmentInsert(f, findX(l), findX(r), 0, cap, 1); - } - - ll _query(int x) { - ll ans = inf; // -inf - for (int i = x + cap; i > 0; i /= 2) { - ans = min(ans, seg[i](x)); // max - } - return ans; - } - ll query(ll x) {return _query(findX(x));} -}; diff --git a/datastructures/monotonicConvexHull.cpp b/datastructures/monotonicConvexHull.cpp deleted file mode 100644 index 44bff83..0000000 --- a/datastructures/monotonicConvexHull.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Lower Envelope mit MONOTONEN Inserts und Queries. Jede neue -// Gerade hat kleinere Steigung als alle vorherigen. -struct Line { - ll m, b; - ll operator()(ll x) {return m*x+b;} -}; - -vector ls; -int ptr = 0; - -bool bad(Line l1, Line l2, Line l3) { - return (l3.b-l1.b)*(l1.m-l2.m) < (l2.b-l1.b)*(l1.m-l3.m); -} - -void add(ll m, ll b) { // Laufzeit O(1) amortisiert - while (sz(ls) > 1 && bad(ls.end()[-2], ls.end()[-1], {m, b})) { - ls.pop_back(); - } - ls.push_back({m, b}); - ptr = min(ptr, sz(ls) - 1); -} - -ll query(ll x) { // Laufzeit: O(1) amortisiert - ptr = min(ptr, sz(ls) - 1); - while (ptr < sz(ls)-1 && ls[ptr + 1](x) < ls[ptr](x)) ptr++; - return ls[ptr](x); -} \ No newline at end of file diff --git a/datastructures/pbds.cpp b/datastructures/pbds.cpp deleted file mode 100644 index c2b44cc..0000000 --- a/datastructures/pbds.cpp +++ /dev/null @@ -1,18 +0,0 @@ -#include -using namespace __gnu_pbds; -template -using Tree = tree, rb_tree_tag, - tree_order_statistics_node_update>; -// T.order_of_key(x): number of elements strictly less than x -// *T.find_by_order(k): k-th element - -template -struct chash { - const uint64_t C = ll(2e18 * acos(-1)) | 199; // random odd - size_t operator()(T o) const { - return __builtin_bswap64(hash()(o) * C); -}}; -template -using hashMap = gp_hash_table>; -template -using hashSet = gp_hash_table>; diff --git a/datastructures/persistent.cpp b/datastructures/persistent.cpp deleted file mode 100644 index 0a65a79..0000000 --- a/datastructures/persistent.cpp +++ /dev/null @@ -1,18 +0,0 @@ -template -struct persistent { - int& time; - vector> data; - - persistent(int& time, T value = {}) - : time(time), data(1, {time, value}) {} - - T get(int t) { - return prev(upper_bound(all(data), {t+1, {}}))->second; - } - - int set(T value) { - time += 2; - data.push_back({time, value}); - return time; - } -}; diff --git a/datastructures/persistentArray.cpp b/datastructures/persistentArray.cpp deleted file mode 100644 index 60d8b17..0000000 --- a/datastructures/persistentArray.cpp +++ /dev/null @@ -1,24 +0,0 @@ -template -struct persistentArray { - int time; - vector> data; - vector> mods; - - persistentArray(int n, T value = {}) - : time(0), data(n, {time, value}) {} - - T get(int p, int t) {return data[p].get(t);} - - int set(int p, T value) { - mods.push_back({p, time}); - return data[p].set(value); - } - - void reset(int t) { - while (!mods.empty() && mods.back().second > t) { - data[mods.back().first].data.pop_back(); - mods.pop_back(); - } - time = t; - } -}; diff --git a/datastructures/segmentTree.cpp b/datastructures/segmentTree.cpp deleted file mode 100644 index 79c6cae..0000000 --- a/datastructures/segmentTree.cpp +++ /dev/null @@ -1,42 +0,0 @@ -struct SegTree { - using T = ll; - int n; - vector tree; - static constexpr T E = 0; // Neutral element for combine - - SegTree(vector& a) : n(sz(a)), tree(2 * n) { - //SegTree(int size, T val = E) : n(size), tree(2 * n, val) { - copy(all(a), tree.begin() + n); - for (int i = n - 1; i > 0; i--) { // remove for range update - tree[i] = comb(tree[2 * i], tree[2 * i + 1]); - }} - - ll comb(T a, T b) {return a + b;} // modify this + neutral - - void update(int i, T val) { - tree[i += n] = val; // apply update code - while (i /= 2) tree[i] = comb(tree[2 * i], tree[2 * i + 1]); - } - - T query(int l, int r) { - T resL = E, resR = E; - for (l += n, r += n; l < r; l /= 2, r /= 2) { - if (l&1) resL = comb(resL, tree[l++]); - if (r&1) resR = comb(tree[--r], resR); - } - return comb(resL, resR); - } - - // OR: range update + point query, needs commutative comb - void modify(int l, int r, T val) { - for (l += n, r += n; l < r; l /= 2, r /= 2) { - if (l&1) tree[l] = comb(tree[l], val), l++; - if (r&1) --r, tree[r] = comb(tree[r], val); - }} - - T query(int i) { - T res = E; - for (i += n; i > 0; i /= 2) res = comb(res, tree[i]); - return res; - } -}; diff --git a/datastructures/sparseTable.cpp b/datastructures/sparseTable.cpp deleted file mode 100644 index 63cce48..0000000 --- a/datastructures/sparseTable.cpp +++ /dev/null @@ -1,23 +0,0 @@ -struct SparseTable { - vector> st; - ll *a; - - int better(int lidx, int ridx) { - return a[lidx] <= a[ridx] ? lidx : ridx; - } - - void init(vector *vec) { - int n = sz(*vec); - a = vec->data(); - st.assign(__lg(n) + 1, vector(n)); - iota(all(st[0]), 0); - for (int j = 0; (2 << j) <= n; j++) { - for (int i = 0; i + (2 << j) <= n; i++) { - st[j + 1][i] = better(st[j][i] , st[j][i + (1 << j)]); - }}} - - int queryIdempotent(int l, int r) { - int j = __lg(r - l); //31 - builtin_clz(r - l); - return better(st[j][l] , st[j][r - (1 << j)]); - } -}; diff --git a/datastructures/sparseTableDisjoint.cpp b/datastructures/sparseTableDisjoint.cpp deleted file mode 100644 index 31e9025..0000000 --- a/datastructures/sparseTableDisjoint.cpp +++ /dev/null @@ -1,26 +0,0 @@ -struct DisjointST { - static constexpr ll neutral = 0 - vector> dst; - ll* a; - - ll combine(const ll& x, const ll& y) { - return x + y; - } - - void init(vector *vec) { - int n = sz(*vec); - a = vec->data(); - dst.assign(__lg(n) + 1, vector(n + 1, neutral)); - for (int h = 0, l = 1; l <= n; h++, l *= 2) { - for (int c = l; c < n + l; c += 2 * l) { - for (int i = c; i < min(n, c + l); i++) - dst[h][i + 1] = combine(dst[h][i], vec->at(i)); - for (int i = min(n, c); i > c - l; i--) - dst[h][i - 1] = combine(vec->at(i - 1), dst[h][i]); - }}} - - ll query(int l, int r) { - int h = __lg(l ^ r); - return combine(dst[h][l], dst[h][r]); - } -}; diff --git a/datastructures/stlHashMap.cpp b/datastructures/stlHashMap.cpp deleted file mode 100644 index b107dde..0000000 --- a/datastructures/stlHashMap.cpp +++ /dev/null @@ -1,17 +0,0 @@ -#include -using namespace __gnu_pbds; - -template -struct betterHash { - size_t operator()(T o) const { - size_t h = hash()(o) ^ 42394245; //random value - h = ((h >> 16) ^ h) * 0x45d9f3b; - h = ((h >> 16) ^ h) * 0x45d9f3b; - h = ((h >> 16) ^ h); - return h; -}}; - -template> -using hashMap = gp_hash_table; -template> -using hashSet = gp_hash_table; diff --git a/datastructures/stlPQ.cpp b/datastructures/stlPQ.cpp deleted file mode 100644 index 4e439f8..0000000 --- a/datastructures/stlPQ.cpp +++ /dev/null @@ -1,15 +0,0 @@ -#include -template -// greater für Min-Queue -using priorityQueue = __gnu_pbds::priority_queue>; - -int main() { - priorityQueue pq; - auto it = pq.push(5); // O(1) - pq.push(7); - pq.pop(); // O(log n) amortisiert - pq.modify(it, 6); // O(log n) amortisiert - pq.erase(it); // O(log n) amortisiert - priorityQueue pq2; - pq.join(pq2); // O(1) -} diff --git a/datastructures/stlPriorityQueue.cpp b/datastructures/stlPriorityQueue.cpp deleted file mode 100644 index 32b2455..0000000 --- a/datastructures/stlPriorityQueue.cpp +++ /dev/null @@ -1,8 +0,0 @@ -#include -template -using pQueue = __gnu_pbds::priority_queue; //> - -auto it = pq.push(5); -pq.modify(it, 6); -pq.join(pq2); -// push, join are O(1), pop, modify, erase O(log n) amortized diff --git a/datastructures/stlRope.cpp b/datastructures/stlRope.cpp deleted file mode 100644 index 804cd67..0000000 --- a/datastructures/stlRope.cpp +++ /dev/null @@ -1,8 +0,0 @@ -#include -using namespace __gnu_cxx; -rope v; // Wie normaler Container. -v.push_back(num); // O(log(n)) -rope sub = v.substr(start, length); // O(log(n)) -v.erase(start, length); // O(log(n)) -v.insert(v.mutable_begin() + offset, sub); // O(log(n)) -for(auto it = v.mutable_begin(); it != v.mutable_end(); it++) diff --git a/datastructures/stlTree.cpp b/datastructures/stlTree.cpp deleted file mode 100644 index fbb68b9..0000000 --- a/datastructures/stlTree.cpp +++ /dev/null @@ -1,13 +0,0 @@ -#include -#include -using namespace std; using namespace __gnu_pbds; -template -using Tree = tree, rb_tree_tag, - tree_order_statistics_node_update>; - -int main() { - Tree X; - for (int i : {1, 2, 4, 8, 16}) X.insert(i); - *X.find_by_order(3); // => 8 - X.order_of_key(10); // => 4 = min i, mit X[i] >= 10 -} diff --git a/datastructures/treap.cpp b/datastructures/treap.cpp deleted file mode 100644 index c96e36a..0000000 --- a/datastructures/treap.cpp +++ /dev/null @@ -1,79 +0,0 @@ -struct node { - int key, prio, left, right, size; - node(int key, int prio) : key(key), prio(prio), left(-1), - right(-1), size(1) {}; -}; - -vector treap; - -int getSize(int root) { - return root < 0 ? 0 : treap[root].size; -} - -void update(int root) { - if (root < 0) return; - treap[root].size = 1 + getSize(treap[root].left) - + getSize(treap[root].right); -} - -pair split(int root, int minKeyRight) { - if (root < 0) return {-1, -1}; - if (treap[root].key >= minKeyRight) { - auto leftSplit = split(treap[root].left, minKeyRight); - treap[root].left = leftSplit.second; - update(root); - leftSplit.second = root; - return leftSplit; - } else { - auto rightSplit = split(treap[root].right, minKeyRight); - treap[root].right = rightSplit.first; - update(root); - rightSplit.first = root; - return rightSplit; -}} - -int merge (int left, int right) { - if (left < 0) return right; - if (right < 0) return left; - if (treap[left].prio < treap[right].prio) { //min priority heap - treap[left].right = merge(treap[left].right, right); - update(left); - return left; - } else { - treap[right].left = merge(left, treap[right].left); - update(right); - return right; -}} - -//insert values with high priority first -int insert(int root, int key, int prio) { - int next = sz(treap); - treap.emplace_back(key, prio); - auto t = split(root, key); - //returns new root - return merge(merge(t.first, next), t.second); -} - -int remove(int root, int key) { - if (root < 0) return -1; - if (key < treap[root].key) { - treap[root].left = remove(treap[root].left, key); - update(root); - return root; - } else if (key > treap[root].key) { - treap[root].right = remove(treap[root].right, key); - update(root); - return root; - } else { //check prio? - return merge(treap[root].left, treap[root].right); -}} - -int kth(int root, int k) { - if (root < 0) return -1; - int leftSize = getSize(treap[root].left); - if (k < leftSize) return kth(treap[root].left, k); - else if (k > leftSize) { - return kth(treap[root].right, k - 1 - leftSize); - } - return root; -} diff --git a/datastructures/treap2.cpp b/datastructures/treap2.cpp deleted file mode 100644 index 10168ca..0000000 --- a/datastructures/treap2.cpp +++ /dev/null @@ -1,79 +0,0 @@ -mt19937 rng(0xc4bd5dad); -struct Treap { - struct Node { - ll val; - int prio, size = 1, l = -1, r = -1; - Node (ll x) : val(x), prio(rng()) {} - }; - - vector treap; - int root = -1; - - int getSize(int v) { - return v < 0 ? 0 : treap[v].size; - } - - void upd(int v) { - if (v < 0) return; - auto *V = &treap[v]; - V->size = 1 + getSize(V->l) + getSize(V->r); - // Update Node Code - } - - void push(int v) { - if (v < 0) return; - //auto *V = &treap[v]; - //if (V->lazy) { - // Lazy Propagation Code - // if (V->l >= 0) treap[V->l].lazy = true; - // if (V->r >= 0) treap[V->r].lazy = true; - // V->lazy = false; - //} - } - - pair split(int v, int k) { - if (v < 0) return {-1, -1}; - auto *V = &treap[v]; - push(v); - if (getSize(V->l) >= k) { // "V->val >= k" for lower_bound(k) - auto [left, right] = split(V->l, k); - V->l = right; - upd(v); - return {left, v}; - } else { - // and only "k" - auto [left, right] = split(V->r, k - getSize(V->l) - 1); - V->r = left; - upd(v); - return {v, right}; - }} - - int merge(int left, int right) { - if (left < 0) return right; - if (right < 0) return left; - if (treap[left].prio < treap[right].prio) { - push(left); - treap[left].r = merge(treap[left].r, right); - upd(left); - return left; - } else { - push(right); - treap[right].l = merge(left, treap[right].l); - upd(right); - return right; - }} - - void insert(int i, ll val) { // and i = val - auto [left, right] = split(root, i); - treap.emplace_back(val); - left = merge(left, sz(treap) - 1); - root = merge(left, right); - } - - void remove(int i, int count = 1) { - auto [left, t_right] = split(root, i); - auto [middle, right] = split(t_right, count); - root = merge(left, right); - } - // for query use remove and read middle BEFORE remerging -}; diff --git a/datastructures/unionFind.cpp b/datastructures/unionFind.cpp deleted file mode 100644 index 68eef86..0000000 --- a/datastructures/unionFind.cpp +++ /dev/null @@ -1,26 +0,0 @@ -// unions[i] >= 0 => unions[i] = parent -// unions[i] < 0 => unions[i] = -size -vector unions; - -void init(int n) { //Initialisieren - unions.assign(n, -1); -} - -int findSet(int n) { // Pfadkompression - if (unions[n] < 0) return n; - return unions[n] = findSet(unions[n]); -} - -void linkSets(int a, int b) { // Union by size. - if (unions[b] > unions[a]) swap(a, b); - unions[b] += unions[a]; - unions[a] = b; -} - -void unionSets(int a, int b) { // Diese Funktion aufrufen. - if (findSet(a) != findSet(b)) linkSets(findSet(a), findSet(b)); -} - -int size(int a) { - return -unions[findSet(a)]; -} diff --git a/datastructures/unionFind2.cpp b/datastructures/unionFind2.cpp deleted file mode 100644 index 5362c4d..0000000 --- a/datastructures/unionFind2.cpp +++ /dev/null @@ -1,25 +0,0 @@ -vector uf; - -init(int N) { - uf.assign(N,-1); //-1 indicates that every subset has size 1 -} - -int findSet(int i) { - if(uf[i] < 0) return i; //If uf[i] < 0 we have reach a root - uf[i] = findSet(uf[i]); //Path-Compression - return uf[i]; -} - -void linkSets(int i, int j) { - //Take |uf[i]|, where i must be a root, to get the size - //of the subset - if(abs(uf[i]) < abs(uf[j])) { //Union-by-size. - uf[j] += uf[i]; uf[i] = j; - } else { - uf[i] += uf[j]; uf[j] = i; - } -} - -void unionSets(int i, int j) { - if(findSet(i) != findSet(j)) linkSets(findSet(i),findSet(j)); -} diff --git a/datastructures/waveletTree.cpp b/datastructures/waveletTree.cpp deleted file mode 100644 index 476658e..0000000 --- a/datastructures/waveletTree.cpp +++ /dev/null @@ -1,40 +0,0 @@ -struct WaveletTree { - using it = vector::iterator; - WaveletTree *ln = nullptr, *rn = nullptr; - vector b = {0}; - ll lo, hi; - - WaveletTree(vector in) : WaveletTree(all(in)) {} - - WaveletTree(it from, it to) : // call above one - lo(*min_element(from, to)), hi(*max_element(from, to) + 1) { - ll mid = (lo + hi) / 2; - auto f = [&](ll x) {return x < mid;}; - for (it c = from; c != to; c++) { - b.push_back(b.back() + f(*c)); - } - if (lo + 1 >= hi) return; - it pivot = stable_partition(from, to, f); - ln = new WaveletTree(from, pivot); - rn = new WaveletTree(pivot, to); - } - - // kth element in sort[l, r) all 0-indexed - ll kth(int l, int r, int k) { - if (l >= r || k >= r - l) return -1; - if (lo + 1 >= hi) return lo; - int inLeft = b[r] - b[l]; - if (k < inLeft) return ln->kth(b[l], b[r], k); - else return rn->kth(l-b[l], r-b[r], k-inLeft); - } - - // count elements in[l, r) smaller than k - int countSmaller(int l, int r, ll k) { - if (l >= r || k <= lo) return 0; - if (hi <= k) return r - l; - return ln->countSmaller(b[l], b[r], k) + - rn->countSmaller(l-b[l], r-b[r], k); - } - - ~WaveletTree() {delete ln; delete rn;} -}; diff --git a/geometry/antipodalPoints.cpp b/geometry/antipodalPoints.cpp deleted file mode 100644 index 110cc74..0000000 --- a/geometry/antipodalPoints.cpp +++ /dev/null @@ -1,12 +0,0 @@ -vector> antipodalPoints(vector& h) { - if (sz(h) < 2) return {}; - vector> result; - for (int i = 0, j = 1; i < j; i++) { - while (true) { - result.push_back({i, j}); - if (cross(h[(i + 1) % sz(h)] - h[i], - h[(j + 1) % sz(h)] - h[j]) <= 0) break; - j = (j + 1) % sz(h); - }} - return result; -} diff --git a/geometry/circle.cpp b/geometry/circle.cpp deleted file mode 100644 index 8ebc800..0000000 --- a/geometry/circle.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// berechnet die Schnittpunkte von zwei kreisen -// (Kreise dürfen nicht gleich sein!) -vector circleIntersection(pt c1, double r1, - pt c2, double r2) { - double d = abs(c1 - c2); - if (d < abs(r1 - r2) || d > abs(r1 + r2)) return {}; - double a = (r1 * r1 - r2 * r2 + d * d) / (2 * d); - pt p = (c2 - c1) * a / d + c1; - if (d == abs(r1 - r2) || d == abs(r1 + r2)) return {p}; - double h = sqrt(r1 * r1 - a * a); - return {p + pt{0, 1} * (c2 - c1) * h / d, - p - pt{0, 1} * (c2 - c1) * h / d}; -} - -// berechnet die Schnittpunkte zwischen -// einem Kreis(Kugel) und einer Grade 2d und 3d -vector circleRayIntersection(pt center, double r, - pt orig, pt dir) { - vector result; - double a = dot(dir, dir); - double b = 2 * dot(dir, orig - center); - double c = dot(orig - center, orig - center) - r * r; - double discr = b * b - 4 * a * c; - if (discr >= 0) { - //t in [0, 1] => schnitt mit segment [orig, orig + dir] - double t1 = -(b + sqrt(discr)) / (2 * a); - double t2 = -(b - sqrt(discr)) / (2 * a); - if (t1 >= 0) result.push_back(t1 * dir + orig); - if (t2 >= 0 && abs(t1 - t2) > EPS) { - result.push_back(t2 * dir + orig); - }} - return result; -} diff --git a/geometry/closestPair.cpp b/geometry/closestPair.cpp deleted file mode 100644 index a2c91b3..0000000 --- a/geometry/closestPair.cpp +++ /dev/null @@ -1,38 +0,0 @@ -bool compY(pt a, pt b) { - return (imag(a) == imag(b)) ? real(a) < real(b) - : imag(a) < imag(b); -} - -bool compX(pt a, pt b) { - return (real(a) == real(b)) ? imag(a) < imag(b) - : real(a) < real(b); -} - -double shortestDist(vector& pts) { // sz(pts) > 1 - set status(compY); - sort(all(pts), compX); - double opt = 1.0/0.0, sqrtOpt = 1.0/0.0; - auto left = pts.begin(), right = pts.begin(); - status.insert(*right); right++; - - while (right != pts.end()) { - if (left != right && - abs(real(*left - *right)) >= sqrtOpt) { - status.erase(*left); - left++; - } else { - auto lower = status.lower_bound({-1.0/0.0, //-INF - imag(*right) - sqrtOpt}); - auto upper = status.upper_bound({-1.0/0.0, //-INF - imag(*right) + sqrtOpt}); - for (;lower != upper; lower++) { - double cand = norm(*right - *lower); - if (cand < opt) { - opt = cand; - sqrtOpt = sqrt(opt); - }} - status.insert(*right); - right++; - }} - return sqrtOpt; -} diff --git a/geometry/convexHull.cpp b/geometry/convexHull.cpp deleted file mode 100644 index b1de170..0000000 --- a/geometry/convexHull.cpp +++ /dev/null @@ -1,19 +0,0 @@ -vector convexHull(vector pts){ - sort(all(pts), [](const pt& a, const pt& b){ - return real(a) == real(b) ? imag(a) < imag(b) - : real(a) < real(b); - }); - pts.erase(unique(all(pts)), pts.end()); - int k = 0; - vector h(2 * sz(pts)); - for (int i = 0; i < sz(pts); i++) {// Untere Hülle. - while (k > 1 && cross(h[k-2], h[k-1], pts[i]) <= 0) k--; - h[k++] = pts[i]; - } - for (int i = sz(pts)-2, t = k; i >= 0; i--) {// Obere Hülle. - while (k > t && cross(h[k-2], h[k-1], pts[i]) <= 0) k--; - h[k++] = pts[i]; - } - h.resize(k); - return h; -} diff --git a/geometry/delaunay.cpp b/geometry/delaunay.cpp deleted file mode 100644 index 1008b39..0000000 --- a/geometry/delaunay.cpp +++ /dev/null @@ -1,124 +0,0 @@ -using lll = __int128; -using pt = complex; - -constexpr pt INF_PT = pt(1e18, 1e18); - -bool circ(pt p, pt a, pt b, pt c) {// p in circle(A,B,C) - return imag((c-b)*conj(p-c)*(a-p)*conj(b-a)) < 0; -} - -struct QuadEdge { - QuadEdge* rot = nullptr; - QuadEdge* onext = nullptr; - pt orig = INF_PT; - bool used = false; - QuadEdge* rev() const {return rot->rot;} - QuadEdge* lnext() const {return rot->rev()->onext->rot;} - QuadEdge* oprev() const {return rot->onext->rot;} - pt dest() const {return rev()->orig;} -}; - -deque edgeData; - -QuadEdge* makeEdge(pt from, pt to) { - for (int j : {0,1,2,3}) edgeData.push_back({}); - auto e = edgeData.end() - 4; - for (int j : {0,1,2,3}) e[j].onext = e[j^3].rot = &e[j^(j>>1)]; - e[0].orig = from; - e[1].orig = to; - return &e[0]; -} - -void splice(QuadEdge* a, QuadEdge* b) { - swap(a->onext->rot->onext, b->onext->rot->onext); - swap(a->onext, b->onext); -} - -QuadEdge* connect(QuadEdge* a, QuadEdge* b) { - QuadEdge* e = makeEdge(a->dest(), b->orig); - splice(e, a->lnext()); - splice(e->rev(), b); - return e; -} - -bool valid(QuadEdge* e, QuadEdge* base) { - return cross(e->dest(), base->orig, base->dest()) < 0; -} - -template -QuadEdge* deleteAll(QuadEdge* e, QuadEdge* base) { - if (valid(e, base)) { - while (circ(base->dest(), base->orig, e->dest(), (ccw ? e->onext : e->oprev())->dest())) { - QuadEdge* t = ccw ? e->onext : e->oprev(); - splice(e, e->oprev()); - splice(e->rev(), e->rev()->oprev()); - e = t; - }} - return e; -} - -template -pair rec(IT l, IT r) { - int n = distance(l, r); - if (n <= 3) { - QuadEdge* a = makeEdge(l[0], l[1]); - if (n == 2) return {a, a->rev()}; - QuadEdge* b = makeEdge(l[1], l[2]); - splice(a->rev(), b); - int side = cross(l[0], l[1], l[2]); - QuadEdge* c = nullptr; - if (side != 0) c = connect(b, a); - if (side >= 0) return {a, b->rev()}; - else return {c->rev(), c}; - } - auto m = l + (n / 2); - auto [ldo, ldi] = rec(l, m); - auto [rdi, rdo] = rec(m, r); - while (true) { - if (cross(rdi->orig, ldi->orig, ldi->dest()) > 0) { - ldi = ldi->lnext(); - } else if (cross(ldi->orig, rdi->orig, rdi->dest()) < 0) { - rdi = rdi->rev()->onext; - } else break; - } - QuadEdge* base = connect(rdi->rev(), ldi); - if (ldi->orig == ldo->orig) ldo = base->rev(); - if (rdi->orig == rdo->orig) rdo = base; - while (true) { - QuadEdge* lcand = deleteAll(base->rev()->onext, base); - QuadEdge* rcand = deleteAll(base->oprev(), base); - if (!valid(lcand, base) && !valid(rcand, base)) break; - if (!valid(lcand, base) || (valid(rcand, base) && - circ(lcand->dest(), lcand->orig, rcand->orig, rcand->dest()))) { - base = connect(rcand, base->rev()); - } else { - base = connect(base->rev(), lcand->rev()); - }} - return {ldo, rdo}; -} - -vector delaunay(vector pts) { - if (sz(pts) <= 2) return {}; - sort(all(pts), [](const pt& a, const pt& b) { - if (real(a) != real(b)) return real(a) < real(b); - return imag(a) < imag(b); - }); - QuadEdge* r = rec(all(pts)).first; - vector edges = {r}; - while (cross(r->onext->dest(), r->dest(), r->orig) < 0) r = r->onext; - auto add = [&](QuadEdge* e){ - QuadEdge* cur = e; - do { - cur->used = true; - pts.push_back(cur->orig); - edges.push_back(cur->rev()); - cur = cur->lnext(); - } while (cur != e); - }; - add(r); - pts.clear(); - for (int i = 0; i < sz(edges); i++) { - if (!edges[i]->used) add(edges[i]); - } - return pts; -} diff --git a/geometry/formulars.cpp b/geometry/formulars.cpp deleted file mode 100644 index 22e9e32..0000000 --- a/geometry/formulars.cpp +++ /dev/null @@ -1,42 +0,0 @@ -// Komplexe Zahlen als Punkte. Wenn immer möglich complex -// verwenden. Funktionen wie abs() geben dann aber ll zurück. -using pt = complex; - -constexpr double PIU = acos(-1.0l); // PIL < PI < PIU -constexpr double PIL = PIU-2e-19l; - -// Winkel zwischen Punkt und x-Achse in [-PI, PI]. -double angle(pt a) {return arg(a);} - -// rotiert Punkt im Uhrzeigersinn um den Ursprung. -pt rotate(pt a, double theta) {return a * polar(1.0, theta);} - -// Skalarprodukt. -double dot(pt a, pt b) {return real(conj(a) * b);} - -// abs()^2.(pre c++20) -double norm(pt a) {return dot(a, a);} - -// Kreuzprodukt, 0, falls kollinear. -double cross(pt a, pt b) {return imag(conj(a) * b);} -double cross(pt p, pt a, pt b) {return cross(a - p, b - p);} - -// 1 => c links von a->b -// 0 => a, b und c kolliniear -// -1 => c rechts von a->b -int orientation(pt a, pt b, pt c) { - double orien = cross(b - a, c - a); - return (orien > EPS) - (orien < -EPS); -} - -// Liegt d in der gleichen Ebene wie a, b, und c? -bool isCoplanar(pt a, pt b, pt c, pt d) { - return abs((b - a) * (c - a) * (d - a)) < EPS; -} - -// charakterisiert winkel zwischen Vektoren u und v -pt uniqueAngle(pt u, pt v) { - pt tmp = v * conj(u); - ll g = abs(gcd(real(tmp), imag(tmp))); - return tmp / g; -} diff --git a/geometry/formulars3d.cpp b/geometry/formulars3d.cpp deleted file mode 100644 index 84e17c0..0000000 --- a/geometry/formulars3d.cpp +++ /dev/null @@ -1,53 +0,0 @@ -// Skalarprodukt -double operator|(pt3 a, pt3 b) { - return a.x * b.x + a.y*b.y + a.z*b.z; -} -double dot(pt3 a, pt3 b) {return a|b;} - -// Kreuzprodukt -pt3 operator*(pt3 a, pt3 b) {return {a.y*b.z - a.z*b.y, - a.z*b.x - a.x*b.z, - a.x*b.y - a.y*b.x};} -pt3 cross(pt3 a, pt3 b) {return a*b;} - -// Länge von a -double abs(pt3 a) {return sqrt(dot(a, a));} -double abs(pt3 a, pt3 b) {return abs(b - a);} - -// Mixedprodukt -double mixed(pt3 a, pt3 b, pt3 c) {return a*b|c;}; - -// orientierung von p zu der Ebene durch a, b, c -// -1 => gegen den Uhrzeigersinn, -// 0 => kolliniear, -// 1 => im Uhrzeigersinn. -int orientation(pt3 a, pt3 b, pt3 c, pt3 p) { - double orien = mixed(b - a, c - a, p - a); - return (orien > EPS) - (orien < -EPS); -} - -// Entfernung von Punkt p zur Ebene a,b,c. -double distToPlane(pt3 a, pt3 b, pt3 c, pt3 p) { - pt3 n = cross(b-a, c-a); - return (abs(dot(n, p)) - dot(n, a)) / abs(n); -} - -// Liegt p in der Ebene a,b,c? -bool pointOnPlane(pt3 a, pt3 b, pt3 c, pt3 p) { - return orientation(a, b, c, p) == 0; -} - -// Schnittpunkt von der Grade a-b und der Ebene c,d,e -// die Grade darf nicht parallel zu der Ebene sein! -pt3 linePlaneIntersection(pt3 a, pt3 b, pt3 c, pt3 d, pt3 e) { - pt3 n = cross(d-c, e-c); - pt3 d = b - a; - return a - d * (dot(n, a) - dot(n, c)) / dot(n, d); -} - -// Abstand zwischen der Grade a-b und c-d -double lineLineDist(pt3 a, pt3 b, pt3 c, pt3 d) { - pt3 n = cross(b - a, d - c); - if (abs(n) < EPS) return distToLine(a, b, c); - return abs(dot(a - c, n)) / abs(n); -} diff --git a/geometry/geometry.tex b/geometry/geometry.tex deleted file mode 100644 index 95c0adb..0000000 --- a/geometry/geometry.tex +++ /dev/null @@ -1,62 +0,0 @@ -\section{Geometrie} - -\begin{algorithm}{Closest Pair} - \begin{methods} - \method{shortestDist}{kürzester Abstand zwischen Punkten}{n\*\log(n)} - \end{methods} - \sourcecode{geometry/closestPair.cpp} -\end{algorithm} - -\begin{algorithm}{Rotating calipers} - \begin{methods} - \method{antipodalPoints}{berechnet antipodale Punkte}{n} - \end{methods} - \textbf{WICHTIG:} Punkte müssen gegen den Uhrzeigersinn sortiert sein und konvexes Polygon bilden! - \sourcecode{geometry/antipodalPoints.cpp} -\end{algorithm} - -\begin{algorithm}{Konvexehülle} - \begin{methods} - \method{convexHull}{berechnet konvexe Hülle}{n\*\log(n)} - \end{methods} - \begin{itemize} - \item Konvexe Hülle gegen den Uhrzeigersinn sortiert - \item nur Eckpunkte enthalten(für alle Punkte = im CCW Test entfernen) - \item erster und letzter Punkt sind identisch - \end{itemize} - \sourcecode{geometry/convexHull.cpp} -\end{algorithm} - -\subsection{Formeln~~--~\texttt{std::complex}} -\sourcecode{geometry/formulars.cpp} -\sourcecode{geometry/linesAndSegments.cpp} -\sourcecode{geometry/sortAround.cpp} -\input{geometry/triangle} -\sourcecode{geometry/triangle.cpp} -\sourcecode{geometry/polygon.cpp} -\sourcecode{geometry/circle.cpp} - -\subsection{Formeln - 3D} -\sourcecode{geometry/formulars3d.cpp} - -\optional{ - \subsection{3D-Kugeln} - \sourcecode{geometry/spheres.cpp} -} - -\begin{algorithm}{Half-plane intersection} - \sourcecode{geometry/hpi.cpp} -\end{algorithm} - -\begin{algorithm}[optional]{Delaunay Triangulierung} - \begin{methods} - \method{delaunay}{berechnet Triangulierung}{n\*\log(n)} - \end{methods} - \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Traingulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig. - \sourcecode{geometry/delaunay.cpp} -\end{algorithm} - -\optional{ -\subsection{Geraden} -\sourcecode{geometry/lines.cpp} -} diff --git a/geometry/hpi.cpp b/geometry/hpi.cpp deleted file mode 100644 index 3509e0e..0000000 --- a/geometry/hpi.cpp +++ /dev/null @@ -1,68 +0,0 @@ -constexpr ll inf = 0x1FFF'FFFF'FFFF'FFFF;//THIS CODE IS WIP - -bool left(pt p) {return real(p) < 0 || - (real(p) == 0 && imag(p) < 0);} -struct hp { - pt from, to; - - hp(pt a, pt b) : from(a), to(b) {} - hp(pt dummy) : hp(dummy, dummy) {} - - bool dummy() const {return from == to;} - pt dir() const {return dummy() ? to : to - from;} - bool operator<(const hp& o) const { - if (left(dir()) != left(o.dir())) - return left(dir()) > left(o.dir()); - return cross(dir(), o.dir()) > 0; - } - - using lll = __int128; - using ptl = complex; - ptl mul(lll m, ptl p) const {return m*p;}//ensure 128bit - - bool check(const hp& a, const hp& b) const { - if (dummy() || b.dummy()) return false; - if (a.dummy()) { - ll ort = sgn(cross(b.dir(), dir())); - if (ort == 0) return cross(from, to, a.from) < 0; - return cross(b.dir(), a.dir()) * ort > 0; - } - ll y = cross(a.dir(), b.dir()); - ll z = cross(b.from - a.from, b.dir()); - ptl i = mul(y, a.from) + mul(z, a.dir()); //intersect a and b - // check if i is outside/right of x - return imag(conj(mul(sgn(y),dir()))*(i-mul(y,from))) < 0; - } -}; - -constexpr ll lim = 2e9+7; - -deque intersect(vector hps) { - hps.push_back(hp(pt{lim+1,-1})); - hps.push_back(hp(pt{lim+1,1})); - sort(all(hps)); - - deque dq = {hp(pt{-lim, 1})}; - for (auto x : hps) { - while (sz(dq) > 1 && x.check(dq.end()[-1], dq.end()[-2])) - dq.pop_back(); - while (sz(dq) > 1 && x.check(dq[0], dq[1])) - dq.pop_front(); - - if (cross(x.dir(), dq.back().dir()) == 0) { - if (dot(x.dir(), dq.back().dir()) < 0) return {}; - if (cross(x.from, x.to, dq.back().from) < 0) - dq.pop_back(); - else continue; - } - dq.push_back(x); - } - - while (sz(dq) > 2 && dq[0].check(dq.end()[-1], dq.end()[-2])) - dq.pop_back(); - while (sz(dq) > 2 && dq.end()[-1].check(dq[0], dq[1])) - dq.pop_front(); - - if (sz(dq) < 3) return {}; - return dq; -} diff --git a/geometry/lines.cpp b/geometry/lines.cpp deleted file mode 100644 index 95536a4..0000000 --- a/geometry/lines.cpp +++ /dev/null @@ -1,33 +0,0 @@ -struct line { - double a, b, c; // ax + by + c = 0; vertikale Line: b = 0, sonst: b = 1 - line(pt p, pt q) : a(-imag(q-p)), b(real(q-p)), c(cross({b, -a},p)) {} -}; - -line pointsToLine(pt p1, pt p2) { - line l; - if (abs(real(p1 - p2)) < EPS) { - l.a = 1; l.b = 0.0; l.c = -real(p1); - } else { - l.a = -imag(p1 - p2) / real(p1 - p2); - l.b = 1.0; - l.c = -(l.a * real(p1)) - imag(p1); - } - return l; -} - -bool parallel(line l1, line l2) { - return (abs(l1.a - l2.a) < EPS) && (abs(l1.b - l2.b) < EPS); -} - -bool same(line l1, line l2) { - return parallel(l1, l2) && (abs(l1.c - l2.c) < EPS); -} - -bool intersect(line l1, line l2, pt& p) { - if (parallel(l1, l2)) return false; - double y, x = (l2.b * l1.c - l1.b * l2.c) / (l2.a * l1.b - l1.a * l2.b); - if (abs(l1.b) > EPS) y = -(l1.a * x + l1.c); - else y = -(l2.a * x + l2.c); - p = {x, y}; - return true; -} diff --git a/geometry/linesAndSegments.cpp b/geometry/linesAndSegments.cpp deleted file mode 100644 index 98fe4dc..0000000 --- a/geometry/linesAndSegments.cpp +++ /dev/null @@ -1,89 +0,0 @@ -// Test auf Streckenschnitt zwischen a-b und c-d. -bool lineSegmentIntersection(pt a, pt b, pt c, pt d) { - if (orientation(a, b, c) == 0 && orientation(a, b, d) == 0) - return pointOnLineSegment(a,b,c) || - pointOnLineSegment(a,b,d) || - pointOnLineSegment(c,d,a) || - pointOnLineSegment(c,d,b); - return orientation(a, b, c) * orientation(a, b, d) <= 0 && - orientation(c, d, a) * orientation(c, d, b) <= 0; -} - -// Berechnet die Schnittpunkte der Strecken p0-p1 und p2-p3. -// Enthält entweder keinen Punkt, den einzigen Schnittpunkt -// oder die Endpunkte der Schnittstrecke. -vector lineSegmentIntersection(pt p0, pt p1, pt p2, pt p3) { - double a = cross(p1 - p0, p3 - p2); - double b = cross(p2 - p0, p3 - p2); - double c = cross(p1 - p0, p0 - p2); - if (a < 0) {a = -a; b = -b; c = -c;} - if (b < -EPS || b-a > EPS || c < -EPS || c-a > EPS) return {}; - if (a > EPS) return {p0 + b/a*(p1 - p0)}; - vector result; - auto insertUnique = [&](pt p) { - for (auto q: result) if (abs(p - q) < EPS) return; - result.push_back(p); - }; - if (dot(p2-p0, p3-p0) < EPS) insertUnique(p0); - if (dot(p2-p1, p3-p1) < EPS) insertUnique(p1); - if (dot(p0-p2, p1-p2) < EPS) insertUnique(p2); - if (dot(p0-p3, p1-p3) < EPS) insertUnique(p3); - return result; -} - -// Entfernung von Punkt p zur Geraden durch a-b. 2d und 3d -double distToLine(pt a, pt b, pt p) { - return abs(cross(p - a, b - a)) / abs(b - a); -} - -// Projiziert p auf die Gerade a-b -pt projectToLine(pt a, pt b, pt p) { - return a + (b - a) * dot(p - a, b - a) / norm(b - a); -} - -// Liegt p auf der Geraden a-b? 2d und 3d -bool pointOnLine(pt a, pt b, pt p) { - return cross(a, b, p) == 0; -} - -// Test auf Linienschnitt zwischen a-b und c-d. -bool lineIntersection(pt a, pt b, pt c, pt d) { - return abs(cross(a - b, c - d)) < EPS; -} - -// Berechnet den Schnittpunkt der Graden p0-p1 und p2-p3. -// die Graden dürfen nicht parallel sein! -pt lineIntersection(pt p0, pt p1, pt p2, pt p3) { - double a = cross(p1 - p0, p3 - p2); - double b = cross(p2 - p0, p3 - p2); - return {p0 + b/a*(p1 - p0)}; -} - -// Liegt p auf der Strecke a-b? -bool pointOnLineSegment(pt a, pt b, pt p) { - if (cross(a, b, p) != 0) return false; - double dist = norm(a - b); - return norm(a - p) <= dist && norm(b - p) <= dist; -} - -// Entfernung von Punkt p zur Strecke a-b. -double distToSegment(pt a, pt b, pt p) { - if (a == b) return abs(p - a); - if (dot(p - a, b - a) <= 0) return abs(p - a); - if (dot(p - b, b - a) >= 0) return abs(p - b); - return distToLine(a, b, p); -} - -// Kürzeste Entfernung zwischen den Strecken a-b und c-d. -double distBetweenSegments(pt a, pt b, pt c, pt d) { - if (lineSegmentIntersection(a, b, c, d)) return 0.0; - return min({distToSegment(a, b, c), distToSegment(a, b, d), - distToSegment(c, d, a), distToSegment(c, d, b)}); -} - -// sortiert alle Punkte pts auf einer Linie entsprechend dir -void sortLine(pt dir, vector& pts) { // (2d und 3d) - sort(all(pts), [&](pt a, pt b){ - return dot(dir, a) < dot(dir, b); - }); -} diff --git a/geometry/polygon.cpp b/geometry/polygon.cpp deleted file mode 100644 index e3ce33e..0000000 --- a/geometry/polygon.cpp +++ /dev/null @@ -1,150 +0,0 @@ -// Flächeninhalt eines Polygons (nicht selbstschneidend). -// Punkte gegen den Uhrzeigersinn: positiv, sonst negativ. -double area(const vector& poly) { //poly[0] == poly.back() - double res = 0; - for (int i = 0; i + 1 < sz(poly); i++) - res += cross(poly[i], poly[i + 1]); - return 0.5 * res; -} - -// Anzahl drehungen einer Polyline um einen Punkt -// p nicht auf rand und poly[0] == poly.back() -// res != 0 or (res & 1) != 0 um inside zu prüfen bei -// selbstschneidenden Polygonen (definitions Sache) -ll windingNumber(pt p, const vector& poly) { - ll res = 0; - for (int i = 0; i + 1 < sz(poly); i++) { - pt a = poly[i], b = poly[i + 1]; - if (real(a) > real(b)) swap(a, b); - if (real(a) <= real(p) && real(p) < real(b) && - cross(p, a, b) < 0) { - res += orientation(p, poly[i], poly[i + 1]); - }} - return res; -} - -// Testet, ob ein Punkt im Polygon liegt (beliebige Polygone). -// Ändere Zeile 32 falls rand zählt, poly[0] == poly.back() -bool inside(pt p, const vector& poly) { - bool in = false; - for (int i = 0; i + 1 < sz(poly); i++) { - pt a = poly[i], b = poly[i + 1]; - if (pointOnLineSegment(a, b, p)) return false; - if (real(a) > real(b)) swap(a,b); - if (real(a) <= real(p) && real(p) < real(b) && - cross(p, a, b) < 0) { - in ^= 1; - }} - return in; -} - -// convex hull without duplicates, h[0] != h.back() -// apply comments if border counts as inside -bool inside(pt p, const vector& hull) { - int l = 0, r = sz(hull) - 1; - if (cross(hull[0], hull[r], p) >= 0) return false; // > 0 - while (l + 1 < r) { - int m = (l + r) / 2; - if (cross(hull[0], hull[m], p) > 0) l = m; // >= 0 - else r = m; - } - return cross(hull[l], hull[r], p) > 0; // >= 0 -} - -void rotateMin(vector& hull) { - auto mi = min_element(all(hull), [](const pt& a, const pt& b){ - return real(a) == real(b) ? imag(a) < imag(b) - : real(a) < real(b); - }); - rotate(hull.begin(), mi, hull.end()); -} - -// convex hulls without duplicates, h[0] != h.back() -vector minkowski(vector ps, vector qs) { - rotateMin(ps); - rotateMin(qs); - ps.push_back(ps[0]); - qs.push_back(qs[0]); - ps.push_back(ps[1]); - qs.push_back(qs[1]); - vector res; - for (ll i = 0, j = 0; i + 2 < sz(ps) || j + 2 < sz(qs);) { - res.push_back(ps[i] + qs[j]); - auto c = cross(ps[i + 1] - ps[i], qs[j + 1] - qs[j]); - if(c >= 0) i++; - if(c <= 0) j++; - } - return res; -} - -// convex hulls without duplicates, h[0] != h.back() -double dist(const vector& ps, const vector& qs) { - for (pt& q : qs) q *= -1; - auto p = minkowski(ps, qs); - p.push_back(p[0]); - double res = 0.0; - //bool intersect = true; - for (ll i = 0; i + 1 < sz(p); i++) { - //intersect &= cross(p[i], p[i+1] - p[i]) <= 0; - res = max(res, cross(p[i], p[i+1]-p[i]) / abs(p[i+1]-p[i])); - } - return res; -} - -bool left(pt of, pt p) {return cross(p, of) < 0 || - (cross(p, of) == 0 && dot(p, of) > 0);} - -// convex hulls without duplicates, hull[0] == hull.back() and -// hull[0] must be a convex point (with angle < pi) -// returns index of corner where dot(dir, corner) is maximized -int extremal(const vector& hull, pt dir) { - dir *= pt(0, 1); - int l = 0, r = sz(hull) - 1; - while (l + 1 < r) { - int m = (l + r) / 2; - pt dm = hull[m+1]-hull[m]; - pt dl = hull[l+1]-hull[l]; - if (left(dl, dir) != left(dl, dm)) { - if (left(dl, dm)) l = m; - else r = m; - } else { - if (cross(dir, dm) < 0) l = m; - else r = m; - }} - return r; -} - -// convex hulls without duplicates, hull[0] == hull.back() and -// hull[0] must be a convex point (with angle < pi) -// {} if no intersection -// {x} if corner is only intersection -// {a, b} segments (a,a+1) and (b,b+1) intersected (if only the -// border is intersected corners a and b are the start and end) -vector intersect(const vector& hull, pt a, pt b) { - int endA = extremal(hull, (a-b) * pt(0, 1)); - int endB = extremal(hull, (b-a) * pt(0, 1)); - // cross == 0 => line only intersects border - if (cross(hull[endA], a, b) > 0 || - cross(hull[endB], a, b) < 0) return {}; - - int n = sz(hull) - 1; - vector res; - for (auto _ : {0, 1}) { - int l = endA, r = endB; - if (r < l) r += n; - while (l + 1 < r) { - int m = (l + r) / 2; - if (cross(hull[m % n], a, b) <= 0 && - cross(hull[m % n], a, b) != hull(poly[endB], a, b)) - l = m; - else r = m; - } - if (cross(hull[r % n], a, b) == 0) l++; - res.push_back(l % n); - swap(endA, endB); - swap(a, b); - } - if (res[0] == res[1]) res.pop_back(); - return res; -} - diff --git a/geometry/segmentIntersection.cpp b/geometry/segmentIntersection.cpp deleted file mode 100644 index 6dc5dc5..0000000 --- a/geometry/segmentIntersection.cpp +++ /dev/null @@ -1,63 +0,0 @@ -struct seg { - pt a, b; - int id; - bool operator<(const seg& o) const { - if (real(a) < real(o.a)) { - int s = orientation(a, b, o.a); - return (s > 0 || (s == 0 && imag(a) < imag(o.a))); - } else if (real(a) > real(o.a)) { - int s = orientation(o.a, o.b, a); - return (s < 0 || (s == 0 && imag(a) < imag(o.a))); - } - return imag(a) < imag(o.a); - } -}; - -struct event { - pt p; - int id, type; - bool operator<(const event& o) const { - if (real(p) != real(o.p)) return real(p) < real(o.p); - if (type != o.type) return type > o.type; - return imag(p) < imag(o.p); - } -}; - -bool lessPT(const pt& a, const pt& b) { - return real(a) != real(b) ? real(a) < real(b) - : imag(a) < imag(b); -} - -bool intersect(const seg& a, const seg& b) { - return lineSegmentIntersection(a.a, a.b, b.a, b.b); -} - -pair intersect(vector& segs) { - vector events; - for (seg& s : segs) { - if (lessPT(s.b, s.a)) swap(s.b, s.a); - events.push_back({s.a, s.id, 1}); - events.push_back({s.b, s.id, -1}); - } - sort(all(events)); - - set q; - vector::iterator> where(sz(segs)); - for (auto e : events) { - int id = e.id; - if (e.type > 0) { - auto it = q.lower_bound(segs[id]); - if (it != q.end() && intersect(*it, segs[id])) - return {it->id, segs[id].id}; - if (it != q.begin() && intersect(*prev(it), segs[id])) - return {prev(it)->id, segs[id].id}; - where[id] = q.insert(it, segs[id]); - } else { - auto it = where[id]; - if (it != q.begin() && next(it) != q.end() && intersect(*next(it), *prev(it))) - return {next(it)->id, prev(it)->id}; - q.erase(it); - } - } - return {-1, -1}; -} diff --git a/geometry/sortAround.cpp b/geometry/sortAround.cpp deleted file mode 100644 index 86fead7..0000000 --- a/geometry/sortAround.cpp +++ /dev/null @@ -1,10 +0,0 @@ -bool left(pt p) {return real(p) < 0 || - (real(p) == 0 && imag(p) < 0);} - -void sortAround(pt p, vector& ps) { - sort(all(ps), [&](const pt& a, const pt& b){ - if (left(a - p) != left(b - p)) - return left(a - p) > left(b - p); - return cross(p, a, b) > 0; - }); -} diff --git a/geometry/spheres.cpp b/geometry/spheres.cpp deleted file mode 100644 index abffde5..0000000 --- a/geometry/spheres.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// Great Cirlce Distance mit Längen- und Breitengrad. -double gcDist(double pLat, double pLon, - double qLat, double qLon, double radius) { - pLat *= PI / 180; pLon *= PI / 180; - qLat *= PI / 180; qLon *= PI / 180; - return radius * acos(cos(pLat) * cos(pLon) * - cos(qLat) * cos(qLon) + - cos(pLat) * sin(pLon) * - cos(qLat) * sin(qLon) + - sin(pLat) * sin(qLat)); -} - -// Great Cirlce Distance mit kartesischen Koordinaten. -double gcDist(point p, point q) { - return acos(p.x * q.x + p.y * q.y + p.z * q.z); -} - -// 3D Punkt in kartesischen Koordinaten. -struct point{ - double x, y, z; - point() {} - point(double x, double y, double z) : x(x), y(y), z(z) {} - point(double lat, double lon) { - lat *= PI / 180.0; lon *= PI / 180.0; - x = cos(lat) * sin(lon); - y = cos(lat) * cos(lon); - z = sin(lat); - } -}; diff --git a/geometry/triangle.cpp b/geometry/triangle.cpp deleted file mode 100644 index 33a8394..0000000 --- a/geometry/triangle.cpp +++ /dev/null @@ -1,43 +0,0 @@ -// Mittelpunkt des Dreiecks abc. -pt centroid(pt a, pt b, pt c) {return (a + b + c) / 3.0;} - -// Flächeninhalt eines Dreicks bei bekannten Eckpunkten. -double area(pt a, pt b, pt c) { - return abs(cross(b - a, c - a)) / 2.0; -} - -// Flächeninhalt eines Dreiecks bei bekannten Seitenlängen. -double area(double a, double b, double c) { - double s = (a + b + c) / 2.0; - return sqrt(s * (s-a) * (s-b) * (s-c)); -} - -// Zentrum des größten Kreises im Dreiecke -pt inCenter(pt a, pt b, pt c) { - double x = abs(a-b), y = abs(b-c), z = abs(a-c); - return (y*a + z*b + x*c) / (x+y+z); -} - -// Zentrum des Kreises durch alle Eckpunkte -// a, b und c nicht kollinear -pt circumCenter(pt a, pt b, pt c) { - b -= a, c -= a; - pt d = b * norm(c) - c * norm(b); - d = {-d.imag(), d.real()}; - return a + d / cross(b, c) / 2.0; -} - -// 1 => p außerhalb Kreis durch a,b,c -// 0 => p auf Kreis durch a,b,c -// -1 => p im Kreis durch a,b,c -int insideOutCenter(pt a, pt b, pt c, pt p) {// braucht lll - return sgn(imag((c-b)*conj(p-c)*(a-p)*conj(b-a))); -} - -// Sind die Dreiecke a1, b1, c1, and a2, b2, c2 ähnlich? -// Erste Zeile testet Ähnlichkeit mit gleicher Orientierung, -// zweite Zeile testet Ähnlichkeit mit verschiedener Orientierung -bool similar(pt a1, pt b1, pt c1, pt a2, pt b2, pt c2) { - return (b2-a2) * (c1-a1) == (b1-a1) * (c2-a2) || - (b2-a2) * conj(c1-a1) == conj(b1-a1) * (c2-a2); -} diff --git a/geometry/triangle.tex b/geometry/triangle.tex deleted file mode 100644 index 3decd54..0000000 --- a/geometry/triangle.tex +++ /dev/null @@ -1,41 +0,0 @@ - -\begin{minipage}[T]{0.27\linewidth} - Generell: - \begin{itemize} - \item $\cos(\gamma)=\frac{a^2+b^2-c^2}{2ab}$ - \item $b=\frac{a}{\sin(\alpha)}\sin(\beta)$ - %\item $b=\frac{a}{\sin(\pi-\beta-\gamma)}\sin(\beta)$ - %\item $\sin(\beta)=\frac{b\sin(\alpha)}{a}$ %asin is not uniquely invertible - \item $\Delta=\frac{bc}{2}\sin(\alpha)$ - \end{itemize} -\end{minipage} -\hfill -\begin{minipage}[B]{0.5\linewidth} - \centering - \begin{tikzpicture}[line cap=round,minimum size=0,x=.7cm,y=0.7cm] - \node[circle,inner sep=0] (AA) at (0,0) {$A$}; - \node[circle,inner sep=0] (BB) at (3,-1) {$B$}; - \node[circle,inner sep=0] (CC) at (3.666667,1) {$C$}; - - \coordinate (A) at (AA.0); - \coordinate (B) at (BB.100); - \coordinate (C) at (CC.210); - - \pic[draw,angle radius=15,pic text=$\gamma$]{angle = A--C--B}; - \pic[draw,angle radius=15,pic text=$\beta$]{angle = C--B--A}; - \pic[draw,angle radius=20,pic text=$\alpha$]{angle = B--A--C}; - - \draw (A) to[edge label={$b$},inner sep=1] (C); - \draw (A) to[edge label'={$c$},inner sep=1.3] (B); - \draw (B) to[edge label'={$a$},inner sep=0.6] (C); - \end{tikzpicture} -\end{minipage} -\hfill -\begin{minipage}[T]{0.16\linewidth} - $\beta=90^\circ$: - \begin{itemize} - \item $\sin(\alpha)=\frac{a}{b}$ - \item $\cos(\alpha)=\frac{c}{b}$ - \item $\tan(\alpha)=\frac{a}{c}$ - \end{itemize} -\end{minipage} diff --git a/graph/2sat.cpp b/graph/2sat.cpp deleted file mode 100644 index 75e54e6..0000000 --- a/graph/2sat.cpp +++ /dev/null @@ -1,31 +0,0 @@ -struct sat2 { - int n; // + scc variablen - vector sol; - - sat2(int vars) : n(vars*2), adj(n) {} - - static int var(int i) {return i << 1;} // use this! - - void addImpl(int a, int b) { - adj[a].push_back(b); - adj[1^b].push_back(1^a); - } - void addEquiv(int a, int b) {addImpl(a, b); addImpl(b, a);} - void addOr(int a, int b) {addImpl(1^a, b);} - void addXor(int a, int b) {addOr(a, b); addOr(1^a, 1^b);} - void addTrue(int a) {addImpl(1^a, a);} - void addFalse(int a) {addTrue(1^a);} - void addAnd(int a, int b) {addTrue(a); addTrue(b);} - void addNand(int a, int b) {addOr(1^a, 1^b);} - - bool solve() { - scc(); //scc code von oben - sol.assign(n, -1); - for (int i = 0; i < n; i += 2) { - if (idx[i] == idx[i + 1]) return false; - sol[i] = idx[i] < idx[i + 1]; - sol[i + 1] = !sol[i]; - } - return true; - } -}; diff --git a/graph/LCA.cpp b/graph/LCA.cpp deleted file mode 100644 index 7debf8f..0000000 --- a/graph/LCA.cpp +++ /dev/null @@ -1,24 +0,0 @@ -vector> adj(); -vector visited(); -vector first(); -vector depth(); - -void initLCA(int gi, int d, int& c) { - visited[c] = gi, depth[c] = d, first[gi] = min(c, first[gi]), c++; - for(int gn : adj[gi]) { - initLCA(gn, d+1, c); - visited[c] = gi, depth[c] = d, c++; -}} - -int getLCA(int a, int b) { - return visited[query(min(first[a], first[b]), max(first[a], first[b]))]; -} - -void exampleUse() { - int c = 0; - visited = vector(2*sz(adj)); - first = vector(sz(adj), 2*sz(adj)); - depth = vector(2*sz(adj)); - initLCA(0, 0, c); - init(depth); -} diff --git a/graph/LCA_sparse.cpp b/graph/LCA_sparse.cpp deleted file mode 100644 index 649e697..0000000 --- a/graph/LCA_sparse.cpp +++ /dev/null @@ -1,32 +0,0 @@ -struct LCA { - vector depth; - vector visited, first; - int idx; - SparseTable st; //sparse table @\sourceref{datastructures/sparseTable.cpp}@ - - void init(vector>& adj, int root) { - depth.assign(2 * sz(adj), 0); - visited.assign(2 * sz(adj), -1); - first.assign(sz(adj), 2 * sz(adj)); - idx = 0; - dfs(adj, root); - st.init(&depth); - } - - void dfs(vector>& adj, int v, ll d=0, int p=-1) { - visited[idx] = v, depth[idx] = d; - first[v] = min(idx, first[v]), idx++; - - for (int u : adj[v]) { - if (first[u] == 2 * sz(adj)) { - dfs(adj, u, d + 1, v); - visited[idx] = v, depth[idx] = d, idx++; - }}} - - int getLCA(int u, int v) { - if (first[u] > first[v]) swap(u, v); - return visited[st.queryIdempotent(first[u], first[v] + 1)]; - } - - ll getDepth(int v) {return depth[first[v]];} -}; diff --git a/graph/TSP.cpp b/graph/TSP.cpp deleted file mode 100644 index cfb1b4d..0000000 --- a/graph/TSP.cpp +++ /dev/null @@ -1,28 +0,0 @@ -vector> dist; // Entfernung zwischen je zwei Punkten. - -void TSP() { - int n = sz(dist), m = 1 << n; - vector> dp(n, vector(m, edge{INF, -1})); - - for (int c = 0; c < n; c++) - dp[c][m-1].dist = dist[c][0], dp[c][m-1].to = 0; - - for (int v = m - 2; v >= 0; v--) { - for (int c = n - 1; c >= 0; c--) { - for (int g = 0; g < n; g++) { - if (g != c && !((1 << g) & v)) { - if ((dp[g][(v | (1 << g))].dist + dist[c][g]) < - dp[c][v].dist) { - dp[c][v].dist = - dp[g][(v | (1 << g))].dist + dist[c][g]; - dp[c][v].to = g; - }}}}} - // return dp[0][1]; // Länge der Tour - - vector tour; tour.push_back(0); int v = 0; - while (tour.back() != 0 || sz(tour) == 1) - tour.push_back(dp[tour.back()] - [(v |= (1 << tour.back()))].to); - // Enthält Knoten 0 zweimal. An erster und letzter Position. - // return tour; -} diff --git a/graph/articulationPoints.cpp b/graph/articulationPoints.cpp deleted file mode 100644 index 6819bf3..0000000 --- a/graph/articulationPoints.cpp +++ /dev/null @@ -1,45 +0,0 @@ -vector> adj; -vector num; -int counter, rootCount, root; -vector isArt; -vector bridges, st; -vector> bcc; - -int dfs(int v, int from = -1) { - int me = num[v] = ++counter, top = me; - for (Edge& e : adj[v]) { - if (e.id == from){} - else if (num[e.to]) { - top = min(top, num[e.to]); - if (num[e.to] < me) st.push_back(e); - } else { - if (v == root) rootCount++; - int si = sz(st); - int up = dfs(e.to, e.id); - top = min(top, up); - if (up >= me) isArt[v] = true; - if (up > me) bridges.push_back(e); - if (up <= me) st.push_back(e); - if (up == me) { - bcc.emplace_back(); - while (sz(st) > si) { - bcc.back().push_back(st.back()); - st.pop_back(); - }}}} - return top; -} - -void find() { - counter = 0; - num.assign(sz(adj), 0); - isArt.assign(sz(adj), false); - bridges.clear(); - st.clear(); - bcc.clear(); - for (int v = 0; v < sz(adj); v++) { - if (!num[v]) { - root = v; - rootCount = 0; - dfs(v); - isArt[v] = rootCount > 1; -}}} diff --git a/graph/bellmannFord.cpp b/graph/bellmannFord.cpp deleted file mode 100644 index 4324886..0000000 --- a/graph/bellmannFord.cpp +++ /dev/null @@ -1,17 +0,0 @@ -void bellmannFord(int n, vector edges, int start) { - vector dist(n, INF), parent(n, -1); - dist[start] = 0; - - for (int i = 1; i < n; i++) { - for (edge& e : edges) { - if (dist[e.from] != INF && - dist[e.from] + e.cost < dist[e.to]) { - dist[e.to] = dist[e.from] + e.cost; - parent[e.to] = e.from; - }}} - - for (edge& e : edges) { - if (dist[e.from] != INF && - dist[e.from] + e.cost < dist[e.to]) { - // Negativer Kreis gefunden. -}}} //return dist, parent?; diff --git a/graph/bitonicTSP.cpp b/graph/bitonicTSP.cpp deleted file mode 100644 index e8fc2cb..0000000 --- a/graph/bitonicTSP.cpp +++ /dev/null @@ -1,31 +0,0 @@ -vector> dist; // Initialisiere mit Entfernungen zwischen Punkten. - -void bitonicTSP() { - vector dp(sz(dist), HUGE_VAL); - vector pre(sz(dist)); // nur für Tour - dp[0] = 0; dp[1] = 2 * dist[0][1]; pre[1] = 0; - for (unsigned int i = 2; i < sz(dist); i++) { - double link = 0; - for (int j = i - 2; j >= 0; j--) { - link += dist[j + 1][j + 2]; - double opt = link + dist[j][i] + dp[j + 1] - dist[j][j + 1]; - if (opt < dp[i]) { - dp[i] = opt; - pre[i] = j; - }}} - // return dp.back(); // Länger der Tour - - int j, n = sz(dist) - 1; - vector ut, lt = {n, n - 1}; - do { - j = pre[n]; - (lt.back() == n ? lt : ut).push_back(j); - for (int i = n - 1; i > j + 1; i--) { - (lt.back() == i ? lt : ut).push_back(i - 1); - } - } while(n = j + 1, j > 0); - (lt.back() == 1 ? lt : ut).push_back(0); - reverse(all(lt)); - lt.insert(lt.end(), all(ut)); - //return lt;// Enthält Knoten 0 zweimal. An erster und letzter Position. -} diff --git a/graph/bitonicTSPsimple.cpp b/graph/bitonicTSPsimple.cpp deleted file mode 100644 index 96ae5bd..0000000 --- a/graph/bitonicTSPsimple.cpp +++ /dev/null @@ -1,28 +0,0 @@ -vector> dist; // Entfernungen zwischen Punkten. -vector> dp; - -double get(int p1, int p2) { - int v = max(p1, p2) + 1; - if (v == sz(dist)) return dist[p1][v - 1] + dist[p2][v - 1]; - if (dp[p1][p2] >= 0.0) return dp[p1][p2]; - double tryLR = dist[p1][v] + get(v, p2); - double tryRL = dist[p2][v] + get(p1, v); - return dp[p1][p2] = min(tryLR, tryRL); -} - -void bitonicTour() { - dp = vector>(sz(dist), - vector(sz(dist), -1)); - get(0, 0); - // return dp[0][0]; // Länger der Tour - vector lr = {0}, rl = {0}; - for (int p1 = 0, p2 = 0, v; (v = max(p1, p2)+1) < sz(dist);) { - if (dp[p1][p2] == dist[p1][v] + dp[v][p2]) { - lr.push_back(v); p1 = v; - } else { - rl.push_back(v); p2 = v; - }} - lr.insert(lr.end(), rl.rbegin(), rl.rend()); - // Enthält Knoten 0 zweimal. An erster und letzter Position. - // return lr; -} diff --git a/graph/blossom.cpp b/graph/blossom.cpp deleted file mode 100644 index 7bd494a..0000000 --- a/graph/blossom.cpp +++ /dev/null @@ -1,82 +0,0 @@ -struct GM { - vector> adj; - // pairs ist der gematchte knoten oder n - vector pairs, first, que; - vector> label; - int head, tail; - - GM(int n) : adj(n), pairs(n + 1, n), first(n + 1, n), - que(n), label(n + 1, {-1, -1}) {} - - void rematch(int u, int v) { - int t = pairs[u]; pairs[u] = v; - if (pairs[t] != u) return; - if (label[u].second == -1) { - pairs[t] = label[u].first; - rematch(pairs[t], t); - } else { - auto [x, y] = label[u]; - rematch(x, y); - rematch(y, x); - }} - - int findFirst(int v) { - return label[first[v]].first < 0 ? first[v] - : first[v] = findFirst(first[v]); - } - - void relabel(int x, int y) { - int r = findFirst(x); - int s = findFirst(y); - if (r == s) return; - auto h = label[r] = label[s] = {~x, y}; - int join; - while (true) { - if (s != sz(adj)) swap(r, s); - r = findFirst(label[pairs[r]].first); - if (label[r] == h) { - join = r; - break; - } else { - label[r] = h; - }} - for (int v : {first[x], first[y]}) { - for (; v != join; v = first[label[pairs[v]].first]) { - label[v] = {x, y}; - first[v] = join; - que[tail++] = v; - }}} - - bool augment(int v) { - label[v] = {sz(adj), -1}; - first[v] = sz(adj); - head = tail = 0; - for (que[tail++] = v; head < tail;) { - int x = que[head++]; - for (int y : adj[x]) { - if (pairs[y] == sz(adj) && y != v) { - pairs[y] = x; - rematch(x, y); - return true; - } else if (label[y].first >= 0) { - relabel(x, y); - } else if (label[pairs[y]].first == -1) { - label[pairs[y]].first = x; - first[pairs[y]] = y; - que[tail++] = pairs[y]; - }}} - return false; - } - - int match() { - int matching = head = tail = 0; - for (int v = 0; v < sz(adj); v++) { - if (pairs[v] < sz(adj) || !augment(v)) continue; - matching++; - for (int i = 0; i < tail; i++) - label[que[i]] = label[pairs[que[i]]] = {-1, -1}; - label[sz(adj)] = {-1, -1}; - } - return matching; - } -}; diff --git a/graph/bronKerbosch.cpp b/graph/bronKerbosch.cpp deleted file mode 100644 index ceeb668..0000000 --- a/graph/bronKerbosch.cpp +++ /dev/null @@ -1,24 +0,0 @@ -using bits = bitset<64>; -vector adj, cliques; - -void addEdge(int a, int b) { - if (a != b) adj[a][b] = adj[b][a] = 1; -} - -void bronKerboschRec(bits R, bits P, bits X) { - if (!P.any() && !X.any()) { - cliques.push_back(R); - } else { - int q = min(P._Find_first(), X._Find_first()); - bits cands = P & ~adj[q]; - for (int i = 0; i < sz(adj); i++) if (cands[i]){ - R[i] = 1; - bronKerboschRec(P & adj[i], X & adj[i], R); - R[i] = P[i] = 0; - X[i] = 1; -}}} - -void bronKerbosch() { - cliques.clear(); - bronKerboschRec({}, {(1ull << sz(adj)) - 1}, {}); -} diff --git a/graph/capacityScaling.cpp b/graph/capacityScaling.cpp deleted file mode 100644 index 90ae654..0000000 --- a/graph/capacityScaling.cpp +++ /dev/null @@ -1,44 +0,0 @@ -struct edge { - int from, to; - ll f, c; -}; - -vector edges; -vector> adj; -int s, t, dfsCounter; -vector visited; -ll capacity; - -void addEdge(int from, int to, ll c) { - adj[from].push_back(sz(edges)); - edges.push_back({from, to, 0, c}); - adj[to].push_back(sz(edges)); - edges.push_back({to, from, 0, 0}); -} - -bool dfs(int v) { - if (v == t) return true; - if (visited[v] == dfsCounter) return false; - visited[v] = dfsCounter; - for (int id : adj[v]) { - if (edges[id].c >= capacity && dfs(edges[id].to)) { - edges[id].c -= capacity; edges[id ^ 1].c += capacity; - edges[id].f += capacity; edges[id ^ 1].f -= capacity; - return true; - }} - return false; -} - -ll maxFlow(int source, int target) { - capacity = 1ll << 62; - s = source; - t = target; - ll flow = 0; - visited.assign(sz(adj), 0); - dfsCounter = 0; - while (capacity) { - while (dfsCounter++, dfs(s)) flow += capacity; - capacity /= 2; - } - return flow; -} diff --git a/graph/centroid.cpp b/graph/centroid.cpp deleted file mode 100644 index 2494464..0000000 --- a/graph/centroid.cpp +++ /dev/null @@ -1,21 +0,0 @@ -vector s; -void dfs_sz(int v, int from = -1) { - s[v] = 1; - for (int u : adj[v]) if (u != from) { - dfs_sz(u, v); - s[v] += s[u]; -}} - -pair dfs_cent(int v, int from, int n) { - for (int u : adj[v]) if (u != from) { - if (2 * s[u] == n) return {v, u}; - if (2 * s[u] > n) return dfs_cent(u, v, n); - } - return {v, -1}; -} - -pair find_centroid(int root) { - s.resize(sz(adj)); - dfs_sz(root); - return dfs_cent(root, -1, s[root]); -} diff --git a/graph/connect.cpp b/graph/connect.cpp deleted file mode 100644 index 98b5b25..0000000 --- a/graph/connect.cpp +++ /dev/null @@ -1,31 +0,0 @@ -struct connect { - int n; - vector> edges; - LCT lct; // min LCT no updates required - - connect(int n, int m) : n(n), edges(m), lct(n+m) {} - - bool connected(int u, int v) { - return lct.connected(&lct.nodes[u], &lct.nodes[v]); - } - - void addEdge(int u, int v, int id) { - lct.nodes[id + n] = LCT::Node(id + n, id + n); - edges[id] = {u, v}; - if (connected(u, v)) { - int old = lct.query(&lct.nodes[u], &lct.nodes[v]); - if (old < id) eraseEdge(old); - } - if (!connected(u, v)) { - lct.link(&lct.nodes[u], &lct.nodes[id + n]); - lct.link(&lct.nodes[v], &lct.nodes[id + n]); - }} - - void eraseEdge(ll id) { - if (connected(edges[id].first, edges[id].second) && - lct.query(&lct.nodes[edges[id].first], - &lct.nodes[edges[id].second]) == id) { - lct.cut(&lct.nodes[edges[id].first], &lct.nodes[id + n]); - lct.cut(&lct.nodes[edges[id].second], &lct.nodes[id + n]); - }} -}; diff --git a/graph/cycleCounting.cpp b/graph/cycleCounting.cpp deleted file mode 100644 index bd7a219..0000000 --- a/graph/cycleCounting.cpp +++ /dev/null @@ -1,64 +0,0 @@ -constexpr int maxEdges = 128; -using cycle = bitset; -struct cylces { - vector>> adj; - vector seen; - vector paths, base; - vector> edges; - - cylces(int n) : adj(n), seen(n), paths(n) {} - - void addEdge(int u, int v) { - adj[u].push_back({v, sz(edges)}); - adj[v].push_back({u, sz(edges)}); - edges.push_back({u, v}); - } - - void addBase(cycle cur) { - for (cycle o : base) { - o ^= cur; - if (o._Find_first() > cur._Find_first()) cur = o; - } - if (cur.any()) base.push_back(cur); - } - - void findBase(int v = 0, int from = -1, cycle cur = {}) { - if (adj.empty()) return; - if (seen[v]) { - addBase(cur ^ paths[v]); - } else { - seen[v] = true; - paths[v] = cur; - for (auto [u, id] : adj[v]) { - if (u == from) continue; - cur[id].flip(); - findBase(u, v, cur); - cur[id].flip(); - }}} - - //cycle must be constrcuted from base - bool isCycle(cycle cur) { - if (cur.none()) return false; - init(sz(adj)); // union find @\sourceref{datastructures/unionFind.cpp}@ - for (int i = 0; i < sz(edges); i++) { - if (cur[i]) { - cur[i] = false; - if (findSet(edges[i].first) == - findSet(edges[i].second)) break; - unionSets(edges[i].first, edges[i].second); - }} - return cur.none(); - }; - - int count() { - findBase(); - int res = 0; - for (int i = 1; i < (1 << sz(base)); i++) { - cycle cur; - for (int j = 0; j < sz(base); j++) { - if (((i >> j) & 1) != 0) cur ^= base[j]; - if (isCycle(cur)) res++; - } - return res; - } -}; diff --git a/graph/dfs.tex b/graph/dfs.tex deleted file mode 100644 index 1e6705f..0000000 --- a/graph/dfs.tex +++ /dev/null @@ -1,16 +0,0 @@ -\begin{expandtable} -\begin{tabularx}{\linewidth}{|X|XIXIX|} - \hline - Kantentyp $(v, w)$ & \code{dfs[v] < dfs[w]} & \code{fin[v] > fin[w]} & \code{seen[w]} \\ - %$(v, w)$ & \code{dfs[w]} & \code{fin[w]} & \\ - \hline - in-tree & \code{true} & \code{true} & \code{false} \\ - \grayhline - forward & \code{true} & \code{true} & \code{true} \\ - \grayhline - backward & \code{false} & \code{false} & \code{true} \\ - \grayhline - cross & \code{false} & \code{true} & \code{true} \\ - \hline -\end{tabularx} -\end{expandtable} diff --git a/graph/dijkstra.cpp b/graph/dijkstra.cpp deleted file mode 100644 index 57071b0..0000000 --- a/graph/dijkstra.cpp +++ /dev/null @@ -1,21 +0,0 @@ -using path = pair; //dist, destination - -void dijkstra(const vector>& adj, int start) { - priority_queue, greater> pq; - vector dist(sz(adj), INF); - vector prev(sz(adj), -1); - dist[start] = 0; pq.emplace(0, start); - - while (!pq.empty()) { - auto [dv, v] = pq.top(); pq.pop(); - if (dv > dist[v]) continue; // WICHTIG! - - for (auto [du, u] : adj[v]) { - ll newDist = dv + du; - if (newDist < dist[u]) { - dist[u] = newDist; - prev[u] = v; - pq.emplace(dist[u], u); - }}} - //return dist, prev; -} diff --git a/graph/dinicScaling.cpp b/graph/dinicScaling.cpp deleted file mode 100644 index f4e833a..0000000 --- a/graph/dinicScaling.cpp +++ /dev/null @@ -1,51 +0,0 @@ -struct Edge { - int to, rev; - ll f, c; -}; - -vector> adj; -int s, t; -vector pt, dist; - -void addEdge(int u, int v, ll c) { - adj[u].push_back({v, (int)sz(adj[v]), 0, c}); - adj[v].push_back({u, (int)sz(adj[u]) - 1, 0, 0}); -} - -bool bfs(ll lim) { - dist.assign(sz(adj), -1); - dist[s] = 0; - queue q({s}); - while (!q.empty() && dist[t] < 0) { - int v = q.front(); q.pop(); - for (Edge& e : adj[v]) { - if (dist[e.to] < 0 && e.c - e.f >= lim) { - dist[e.to] = dist[v] + 1; - q.push(e.to); - }}} - return dist[t] >= 0; -} - -bool dfs(int v, ll flow) { - if (v == t) return true; - for (; pt[v] < sz(adj[v]); pt[v]++) { - Edge& e = adj[v][pt[v]]; - if (dist[e.to] != dist[v] + 1) continue; - if (e.c - e.f >= flow && dfs(e.to, flow)) { - e.f += flow; - adj[e.to][e.rev].f -= flow; - return true; - }} - return false; -} - -ll maxFlow(int source, int target) { - s = source, t = target; - ll flow = 0; - for (ll lim = (1LL << 62); lim >= 1; lim /= 2) { - while (bfs(lim)) { - pt.assign(sz(adj), 0); - while (dfs(s, lim)) flow += lim; - }} - return flow; -} diff --git a/graph/euler.cpp b/graph/euler.cpp deleted file mode 100644 index a5ea192..0000000 --- a/graph/euler.cpp +++ /dev/null @@ -1,23 +0,0 @@ -vector> idx; -vector to, validIdx, cycle; -vector used; - -void addEdge(int u, int v) { - idx[u].push_back(sz(to)); - to.push_back(v); - used.push_back(false); - idx[v].push_back(sz(to)); // für ungerichtet - to.push_back(u); - used.push_back(false); -} - -void euler(int v) { // init idx und validIdx - for (;validIdx[v] < sz(idx[v]); validIdx[v]++) { - if (!used[idx[v][validIdx[v]]]) { - int u = to[idx[v][validIdx[v]]]; - used[idx[v][validIdx[v]]] = true; - used[idx[v][validIdx[v]] ^ 1] = true; // für ungerichtet - euler(u); - }} - cycle.push_back(v); // Zyklus in umgekehrter Reihenfolge. -} diff --git a/graph/floydWarshall.cpp b/graph/floydWarshall.cpp deleted file mode 100644 index fb6263e..0000000 --- a/graph/floydWarshall.cpp +++ /dev/null @@ -1,26 +0,0 @@ -vector> dist; // Entfernung zwischen je zwei Punkten. -vector> pre; - -void floydWarshall() { - pre.assign(sz(dist), vector(sz(dist), -1)); - for (int i = 0; i < sz(dist); i++) { - for (int j = 0; j < sz(dist); j++) { - if (dist[i][j] < INF) { - pre[i][j] = j; - }}} - - for (int k = 0; k < sz(dist); k++) { - for (int i = 0; i < sz(dist); i++) { - for (int j = 0; j < sz(dist); j++) { - if (dist[i][j] > dist[i][k] + dist[k][j]) { - dist[i][j] = dist[i][k] + dist[k][j]; - pre[i][j] = pre[i][k]; -}}}}} - -vector getPath(int u, int v) { - //return dist[u][v]; // Pfadlänge u -> v - if (pre[u][v] < 0) return {}; - vector path = {v}; - while (u != v) path.push_back(u = pre[u][v]); - return path; //Pfad u -> v -} diff --git a/graph/graph.tex b/graph/graph.tex deleted file mode 100644 index 9232090..0000000 --- a/graph/graph.tex +++ /dev/null @@ -1,276 +0,0 @@ -\section{Graphen} - -\begin{algorithm}{Kruskal} - \begin{methods}[ll] - berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\ - \end{methods} - \sourcecode{graph/kruskal.cpp} -\end{algorithm} - -\begin{algorithm}{Minimale Spannbäume} - \paragraph{Schnitteigenschaft} - Für jeden Schnitt $C$ im Graphen gilt: - Gibt es eine Kante $e$, die echt leichter ist als alle anderen Schnittkanten, so gehört diese zu allen minimalen Spannbäumen. - ($\Rightarrow$ Die leichteste Kante in einem Schnitt kann in einem minimalen Spannbaum verwendet werden.) - - \paragraph{Kreiseigenschaft} - Für jeden Kreis $K$ im Graphen gilt: - Die schwerste Kante auf dem Kreis ist nicht Teil des minimalen Spannbaums. -\end{algorithm} - -\begin{algorithm}{Heavy-Light Decomposition} - \begin{methods} - \method{get\_intervals}{gibt Zerlegung des Pfades von $u$ nach $v$}{\log(\abs{V})} - \end{methods} - \textbf{Wichtig:} Intervalle sind halboffen - - Subbaum unter dem Knoten $v$ ist das Intervall $[\text{\code{in[v]}},~\text{\code{out[v]}})$. - \sourcecode{graph/hld.cpp} -\end{algorithm} - -\begin{algorithm}{Lowest Common Ancestor} - \begin{methods} - \method{init}{baut DFS-Baum über $g$ auf}{\abs{V}\*\log(\abs{V})} - \method{getLCA}{findet LCA}{1} - \method{getDepth}{berechnet Distanz zur Wurzel im DFS-Baum}{1} - \end{methods} - \sourcecode{graph/LCA_sparse.cpp} -\end{algorithm} - -\begin{algorithm}{Centroids} - \begin{methods} - \method{find\_centroid}{findet alle Centroids des Baums (maximal 2)}{\abs{V}} - \end{methods} - \sourcecode{graph/centroid.cpp} -\end{algorithm} - -\begin{algorithm}{Eulertouren} - \begin{methods} - \method{euler}{berechnet den Kreis}{\abs{V}+\abs{E}} - \end{methods} - \sourcecode{graph/euler.cpp} - \begin{itemize} - \item Zyklus existiert, wenn jeder Knoten geraden Grad hat (ungerichtet),\\ bei jedem Knoten Ein- und Ausgangsgrad übereinstimmen (gerichtet). - \item Pfad existiert, wenn genau $\{0, 2\}$ Knoten ungeraden Grad haben (ungerichtet),\\ bei allen Knoten Ein- und Ausgangsgrad übereinstimmen oder einer eine Ausgangskante mehr hat (Startknoten) und einer eine Eingangskante mehr hat (Endknoten). - \item \textbf{Je nach Aufgabenstellung überprüfen, wie ein unzusammenhängender Graph interpretiert werden sollen.} - \item Wenn eine bestimmte Sortierung verlangt wird oder Laufzeit vernachlässigbar ist, ist eine Implementierung mit einem \code{vector> adj} leichter - \item \textbf{Wichtig:} Algorithmus schlägt nicht fehl, falls kein Eulerzyklus existiert. - Die Existenz muss separat geprüft werden. - \end{itemize} -\end{algorithm} - -\begin{algorithm}{Baum-Isomorphie} - \begin{methods} - \method{treeLabel}{berechnet kanonischen Namen für einen Baum}{\abs{V}\*\log(\abs{V})} - \end{methods} - \sourcecode{graph/treeIsomorphism.cpp} -\end{algorithm} - -\subsection{Kürzeste Wege} - -\subsubsection{\textsc{Bellmann-Ford}-Algorithmus} -\method{bellmanFord}{kürzeste Pfade oder negative Kreise finden}{\abs{V}\*\abs{E}} -\sourcecode{graph/bellmannFord.cpp} - -\subsubsection{Algorithmus von \textsc{Dijkstra}} -\method{dijkstra}{kürzeste Pfade in Graphen ohne negative Kanten}{\abs{E}\*\log(\abs{V})} -\sourcecode{graph/dijkstra.cpp} - -\subsubsection{\textsc{Floyd-Warshall}-Algorithmus} -\method{floydWarshall}{kürzeste Pfade oder negative Kreise finden}{\abs{V}^3} -\begin{itemize} - \item \code{dist[i][i] = 0, dist[i][j] = edge\{j, j\}.weight} oder \code{INF} - \item \code{i} liegt auf einem negativen Kreis $\Leftrightarrow$ \code{dist[i][i] < 0}. -\end{itemize} -\sourcecode{graph/floydWarshall.cpp} - -\subsubsection{Matrix-Algorithmus} -Sei $d_{i\smash{j}}$ die Distanzmatrix von $G$, dann gibt $d_{i\smash{j}}^k$ die kürzeste Distanz von $i$ nach $j$ mit maximal $k$ kanten an mit der Verknüpfung: $c_{i\smash{j}} = a_{i\smash{j}} \otimes b_{i\smash{j}} = \min\{a_{ik} \cdot b_{k\smash{j}}\}$ - - -Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, dann gibt $a_{i\smash{j}}^k$ die Anzahl der Wege von $i$ nach $j$ mit Länge genau \textcolor{gray}{(maximal)} $k$ an mit der Verknüpfung: $c_{i\smash{j}} = a_{i\smash{j}} \otimes b_{i\smash{j}} = \sum a_{ik} \cdot b_{k\smash{j}}$ - -\begin{algorithm}{Dynamic Connectivity} - \begin{methods} - \method{Constructor}{erzeugt Baum ($n$ Knoten, $m$ updates)}{n+m} - \method{addEdge}{fügt Kannte ein,\code{id}=delete Zeitpunkt}{\log(n)} - \method{eraseEdge}{entfernt Kante \code{id}}{\log(n)} - \end{methods} - \sourcecode{graph/connect.cpp} -\end{algorithm} - -\begin{algorithm}{Erd\H{o}s-Gallai} - Sei $d_1 \geq \cdots \geq d_{n}$. Es existiert genau dann ein Graph $G$ mit Degreesequence $d$ falls $\sum\limits_{i=1}^{n} d_i$ gerade ist und für $1\leq k \leq n$: $\sum\limits_{i=1}^{k} d_i \leq k\cdot(k-1)+\sum\limits_{i=k+1}^{n} \min(d_i, k)$ - \begin{methods} - \method{havelHakimi}{findet Graph}{(\abs{V}+\abs{E})\cdot\log(\abs{V})} - \end{methods} - \sourcecode{graph/havelHakimi.cpp} -\end{algorithm} - -\begin{algorithm}{Strongly Connected Components (\textsc{Tarjan})} - \begin{methods} - \method{scc}{berechnet starke Zusammenhangskomponenten}{\abs{V}+\abs{E}} - \end{methods} - \sourcecode{graph/scc.cpp} -\end{algorithm} - -\begin{algorithm}{DFS} - \input{graph/dfs} -\end{algorithm} - -\begin{algorithm}{Artikulationspunkte, Brücken und BCC} - \begin{methods} - \method{find}{berechnet Artikulationspunkte, Brücken und BCC}{\abs{V}+\abs{E}} - \end{methods} - \textbf{Wichtig:} isolierte Knoten und Brücken sind keine BCC. - \sourcecode{graph/articulationPoints.cpp} -\end{algorithm} - -\begin{algorithm}{2-SAT} - \sourcecode{graph/2sat.cpp} -\end{algorithm} - -\begin{algorithm}{Maximal Cliques} - \begin{methods} - \method{bronKerbosch}{berechnet alle maximalen Cliquen}{3^\frac{n}{3}} - \method{addEdge}{fügt \textbf{ungerichtete} Kante ein}{1} - \end{methods} - \sourcecode{graph/bronKerbosch.cpp} -\end{algorithm} - -\begin{algorithm}{Cycle Counting} - \begin{methods} - \method{findBase}{berechnet Basis}{\abs{V}\cdot\abs{E}} - \method{count}{zählt Zykel}{2^{\abs{\mathit{base}}}} - \end{methods} - \begin{itemize} - \item jeder Zyklus ist das xor von einträgen in \code{base}. - \end{itemize} - \sourcecode{graph/cycleCounting.cpp} -\end{algorithm} - -\begin{algorithm}{Wert des maximalen Matchings} - Fehlerwahrscheinlichkeit: $\left(\frac{m}{MOD}\right)^I$ - \sourcecode{graph/matching.cpp} -\end{algorithm} - -\begin{algorithm}{Allgemeines maximales Matching} - \begin{methods} - \method{match}{berechnet algemeines Matching}{\abs{E}\*\abs{V}\*\log(\abs{V})} - \end{methods} - \sourcecode{graph/blossom.cpp} -\end{algorithm} - -\begin{algorithm}{Rerooting Template} - \sourcecode{graph/reroot.cpp} -\end{algorithm} - -\begin{algorithm}{Virtual Trees} - \sourcecode{graph/virtualTree.cpp} -\end{algorithm} - -\begin{algorithm}{Maximal Cardinatlity Bipartite Matching} - \label{kuhn} - \begin{methods} - \method{kuhn}{berechnet Matching}{\abs{V}\*\min(ans^2, \abs{E})} - \end{methods} - \begin{itemize} - \item die ersten [0..l) Knoten in \code{adj} sind die linke Seite des Graphen - \end{itemize} - \sourcecode{graph/maxCarBiMatch.cpp} - \begin{methods} - \method{hopcroft\_karp}{berechnet Matching}{\sqrt{\abs{V}}\*\abs{E}} - \end{methods} - \sourcecode{graph/hopcroftKarp.cpp} -\end{algorithm} - -\begin{algorithm}{Global Mincut} - \begin{methods} - \method{stoer\_wagner}{berechnet globalen Mincut}{\abs{V}\abs{E}+\abs{V}^2\*\log(\abs{E})} - \method{merge(a,b)}{merged Knoten $b$ in Knoten $a$}{\abs{E}} - \end{methods} - \textbf{Tipp:} Cut Rekonstruktion mit \code{unionFind} für Partitionierung oder \code{vector} für edge id's im cut. - \sourcecode{graph/stoerWagner.cpp} -\end{algorithm} - -\subsection{Max-Flow} -\optional{ -\subsubsection{Capacity Scaling} -\begin{methods} - \method{maxFlow}{gut bei dünn besetzten Graphen.}{\abs{E}^2\*\log(C)} - \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} -\end{methods} -\sourcecode{graph/capacityScaling.cpp} -} - -\optional{ -\subsubsection{Push Relabel} -\begin{methods} - \method{maxFlow}{gut bei sehr dicht besetzten Graphen.}{\abs{V}^2\*\sqrt{\abs{E}}} - \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} -\end{methods} -\sourcecode{graph/pushRelabel.cpp} -} - -\begin{algorithm}{Min-Cost-Max-Flow} - \begin{methods} - \method{mincostflow}{berechnet Fluss}{\abs{V}^2\cdot\abs{E}^2} - \end{methods} - \sourcecode{graph/minCostMaxFlow.cpp} -\end{algorithm} - -\subsubsection{Dinic's Algorithm mit Capacity Scaling} -\begin{methods} - \method{maxFlow}{doppelt so schnell wie Ford Fulkerson}{\abs{V}^2\cdot\abs{E}} - \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1} -\end{methods} -\sourcecode{graph/dinicScaling.cpp} -\vfill\null -\columnbreak - -\optional{ -\subsubsection{Anwendungen} -\begin{itemize} - \item \textbf{Maximum Edge Disjoint Paths}\newline - Finde die maximale Anzahl Pfade von $s$ nach $t$, die keine Kante teilen. - \begin{enumerate} - \item Setze $s$ als Quelle, $t$ als Senke und die Kapazität jeder Kante auf 1. - \item Der maximale Fluss entspricht den unterschiedlichen Pfaden ohne gemeinsame Kanten. - \end{enumerate} - \item \textbf{Maximum Independent Paths}\newline - Finde die maximale Anzahl an Pfaden von $s$ nach $t$, die keinen Knoten teilen. - \begin{enumerate} - \item Setze $s$ als Quelle, $t$ als Senke und die Kapazität jeder Kante \emph{und jedes Knotens} auf 1. - \item Der maximale Fluss entspricht den unterschiedlichen Pfaden ohne gemeinsame Knoten. - \end{enumerate} - \item \textbf{Min-Cut}\newline - Der maximale Fluss ist gleich dem minimalen Schnitt. - Bei Quelle $s$ und Senke $t$, partitioniere in $S$ und $T$. - Zu $S$ gehören alle Knoten, die im Residualgraphen von $s$ aus erreichbar sind (Rückwärtskanten beachten). -\end{itemize} -} - -\begin{algorithm}{Maximum Weight Bipartite Matching} - \begin{methods} - \method{match}{berechnet Matching}{\abs{V}^3} - \end{methods} - \sourcecode{graph/maxWeightBipartiteMatching.cpp} -\end{algorithm} -\vfill\null -\columnbreak - - -\begin{algorithm}[optional]{TSP} - \begin{methods} - \method{TSP}{berechnet eine Tour}{n^2\*2^n} - \end{methods} - \sourcecode{graph/TSP.cpp} -\end{algorithm} - -\begin{algorithm}[optional]{Bitonic TSP} - \begin{methods} - \method{bitonicTSP}{berechnet eine Bitonische Tour}{n^2} - \end{methods} - \sourcecode{graph/bitonicTSPsimple.cpp} -\end{algorithm} - diff --git a/graph/havelHakimi.cpp b/graph/havelHakimi.cpp deleted file mode 100644 index cbd6991..0000000 --- a/graph/havelHakimi.cpp +++ /dev/null @@ -1,18 +0,0 @@ -vector> havelHakimi(const vector& deg) { - priority_queue> pq; - for (int i = 0; i < sz(deg); i++) { - if (deg[i] > 0) pq.push({deg[i], i}); - } - vector> adj; - while (!pq.empty()) { - auto [degV, v] = pq.top(); pq.pop(); - if (sz(pq) < degV) return {}; //impossible - vector> todo(degV); - for (auto& e : todo) e = pq.top(), pq.pop(); - for (auto [degU, u] : todo) { - adj[v].push_back(u); - adj[u].push_back(v); - if (degU > 1) pq.push({degU - 1, u}); - }} - return adj; -} diff --git a/graph/hld.cpp b/graph/hld.cpp deleted file mode 100644 index 65d3f5c..0000000 --- a/graph/hld.cpp +++ /dev/null @@ -1,44 +0,0 @@ -vector> adj; -vector sz, in, out, nxt, par; -int counter; - -void dfs_sz(int v = 0, int from = -1) { - for (auto& u : adj[v]) if (u != from) { - dfs_sz(u, v); - sz[v] += sz[u]; - if (adj[v][0] == from || sz[u] > sz[adj[v][0]]) { - swap(u, adj[v][0]); //changes adj! -}}} - -void dfs_hld(int v = 0, int from = -1) { - par[v] = from; - in[v] = counter++; - for (int u : adj[v]) if (u != from) { - nxt[u] = (u == adj[v][0]) ? nxt[v] : u; - dfs_hld(u, v); - } - out[v] = counter; -} - -void init(int root = 0) { - int n = sz(adj); - sz.assign(n, 1), nxt.assign(n, root), par.assign(n, -1); - in.resize(n), out.resize(n); - counter = 0; - dfs_sz(root); - dfs_hld(root); -} - -template -void for_intervals(int u, int v, F&& f) { - for (;; v = par[nxt[v]]) { - if (in[v] < in[u]) swap(u, v); - f(max(in[u], in[nxt[v]]), in[v] + 1); - if (in[nxt[v]] <= in[u]) return; -}} - -int get_lca(int u, int v) { - for (;; v = par[nxt[v]]) { - if (in[v] < in[u]) swap(u, v); - if (in[nxt[v]] <= in[u]) return u; -}} diff --git a/graph/hopcroftKarp.cpp b/graph/hopcroftKarp.cpp deleted file mode 100644 index c1f5d1c..0000000 --- a/graph/hopcroftKarp.cpp +++ /dev/null @@ -1,47 +0,0 @@ -vector> adj; -// pairs ist der gematchte Knoten oder -1 -vector pairs, dist, ptr; - -bool bfs(int l) { - queue q; - for(int v = 0; v < l; v++) { - if (pairs[v] < 0) {dist[v] = 0; q.push(v);} - else dist[v] = -1; - } - bool exist = false; - while(!q.empty()) { - int v = q.front(); q.pop(); - for (int u : adj[v]) { - if (pairs[u] < 0) {exist = true; continue;} - if (dist[pairs[u]] < 0) { - dist[pairs[u]] = dist[v] + 1; - q.push(pairs[u]); - }}} - return exist; -} - -bool dfs(int v) { - for (; ptr[v] < sz(adj[v]); ptr[v]++) { - int u = adj[v][ptr[v]]; - if (pairs[u] < 0 || - (dist[pairs[u]] > dist[v] && dfs(pairs[u]))) { - pairs[u] = v; pairs[v] = u; - return true; - }} - return false; -} - -int hopcroft_karp(int l) { // l = #Knoten links - int ans = 0; - pairs.assign(sz(adj), -1); - dist.resize(l); - // Greedy Matching, optionale Beschleunigung. - for (int v = 0; v < l; v++) for (int u : adj[v]) - if (pairs[u] < 0) {pairs[u] = v; pairs[v] = u; ans++; break;} - while(bfs(l)) { - ptr.assign(l, 0); - for(int v = 0; v < l; v++) { - if (pairs[v] < 0) ans += dfs(v); - }} - return ans; -} diff --git a/graph/kruskal.cpp b/graph/kruskal.cpp deleted file mode 100644 index 987d30b..0000000 --- a/graph/kruskal.cpp +++ /dev/null @@ -1,9 +0,0 @@ -sort(all(edges)); -vector mst; -ll cost = 0; -for (Edge& e : edges) { - if (findSet(e.from) != findSet(e.to)) { - unionSets(e.from, e.to); - mst.push_back(e); - cost += e.cost; -}} diff --git a/graph/matching.cpp b/graph/matching.cpp deleted file mode 100644 index 2513604..0000000 --- a/graph/matching.cpp +++ /dev/null @@ -1,23 +0,0 @@ -constexpr int MOD=1'000'000'007, I=10; -vector> adj, mat; - -int max_matching() { - int ans = 0; - mat.assign(sz(adj), {}); - for (int _ = 0; _ < I; _++) { - for (int v = 0; v < sz(adj); v++) { - mat[v].assign(sz(adj), 0); - for (int u : adj[v]) { - if (u < v) { - mat[v][u] = rand() % (MOD - 1) + 1; - mat[u][v] = MOD - mat[v][u]; - }}} - gauss(sz(adj), MOD); //LGS @\sourceref{math/lgsFp.cpp}@ - int rank = 0; - for (auto& row : mat) { - if (*min_element(all(row)) != 0) rank++; - } - ans = max(ans, rank / 2); - } - return ans; -} diff --git a/graph/maxCarBiMatch.cpp b/graph/maxCarBiMatch.cpp deleted file mode 100644 index e928387..0000000 --- a/graph/maxCarBiMatch.cpp +++ /dev/null @@ -1,25 +0,0 @@ -vector> adj; -vector pairs; // Der gematchte Knoten oder -1. -vector visited; - -bool dfs(int v) { - if (visited[v]) return false; - visited[v] = true; - for (int u : adj[v]) if (pairs[u] < 0 || dfs(pairs[u])) { - pairs[u] = v; pairs[v] = u; return true; - } - return false; -} - -int kuhn(int l) { // l = #Knoten links. - pairs.assign(sz(adj), -1); - int ans = 0; - // Greedy Matching. Optionale Beschleunigung. - for (int v = 0; v < l; v++) for (int u : adj[v]) - if (pairs[u] < 0) {pairs[u] = v; pairs[v] = u; ans++; break;} - for (int v = 0; v < l; v++) if (pairs[v] < 0) { - visited.assign(l, false); - ans += dfs(v); - } - return ans; // Größe des Matchings. -} diff --git a/graph/maxWeightBipartiteMatching.cpp b/graph/maxWeightBipartiteMatching.cpp deleted file mode 100644 index a2b0a80..0000000 --- a/graph/maxWeightBipartiteMatching.cpp +++ /dev/null @@ -1,50 +0,0 @@ -double costs[N_LEFT][N_RIGHT]; - -// Es muss l<=r sein! (sonst Endlosschleife) -double match(int l, int r) { - vector lx(l), ly(r); - //xy is matching from l->r, yx from r->l, or -1 - vector xy(l, -1), yx(r, -1); - vector> slack(r); - - for (int x = 0; x < l; x++) - lx[x] = *max_element(costs[x], costs[x] + r); - for (int root = 0; root < l; root++) { - vector aug(r, -1); - vector s(l); - s[root] = true; - for (int y = 0; y < r; y++) { - slack[y] = {lx[root] + ly[y] - costs[root][y], root}; - } - int y = -1; - while (true) { - double delta = INF; - int x = -1; - for (int yy = 0; yy < r; yy++) { - if (aug[yy] < 0 && slack[yy].first < delta) { - tie(delta, x) = slack[yy]; - y = yy; - }} - if (delta > 0) { - for (int x = 0; x < l; x++) if (s[x]) lx[x] -= delta; - for (int y = 0; y < r; y++) { - if (aug[y] >= 0) ly[y] += delta; - else slack[y].first -= delta; - }} - aug[y] = x; - x = yx[y]; - if (x < 0) break; - s[x] = true; - for (int y = 0; y < r; y++) { - if (aug[y] < 0) { - double alt = lx[x] + ly[y] - costs[x][y]; - if (slack[y].first > alt) { - slack[y] = {alt, x}; - }}}} - while (y >= 0) { - yx[y] = aug[y]; - swap(y, xy[aug[y]]); - }} - return accumulate(all(lx), 0.0) + - accumulate(all(ly), 0.0); // Wert des Matchings -} diff --git a/graph/minCostMaxFlow.cpp b/graph/minCostMaxFlow.cpp deleted file mode 100644 index 14a222c..0000000 --- a/graph/minCostMaxFlow.cpp +++ /dev/null @@ -1,66 +0,0 @@ -constexpr ll INF = 1LL << 60; // Größer als der maximale Fluss. -struct MinCostFlow { - struct edge { - int to; - ll f, cost; - }; - vector edges; - vector> adj; - vector pref, con; - vector dist; - const int s, t; - ll maxflow, mincost; - - MinCostFlow(int n, int source, int target) : - adj(n), s(source), t(target) {}; - - void addEdge(int u, int v, ll c, ll cost) { - adj[u].push_back(sz(edges)); - edges.push_back({v, c, cost}); - adj[v].push_back(sz(edges)); - edges.push_back({u, 0, -cost}); - } - - bool SPFA() { - pref.assign(sz(adj), -1); - dist.assign(sz(adj), INF); - vector inqueue(sz(adj)); - queue queue; - dist[s] = 0; - queue.push(s); - pref[s] = s; - inqueue[s] = true; - while (!queue.empty()) { - int cur = queue.front(); queue.pop(); - inqueue[cur] = false; - for (int id : adj[cur]) { - int to = edges[id].to; - if (edges[id].f > 0 && - dist[to] > dist[cur] + edges[id].cost) { - dist[to] = dist[cur] + edges[id].cost; - pref[to] = cur; - con[to] = id; - if (!inqueue[to]) { - inqueue[to] = true; - queue.push(to); - }}}} - return pref[t] != -1; - } - - void extend() { - ll w = INF; - for (int u = t; pref[u] != u; u = pref[u]) - w = min(w, edges[con[u]].f); - maxflow += w; - mincost += dist[t] * w; - for (int u = t; pref[u] != u; u = pref[u]) { - edges[con[u]].f -= w; - edges[con[u] ^ 1].f += w; - }} - - void mincostflow() { - con.assign(sz(adj), 0); - maxflow = mincost = 0; - while (SPFA()) extend(); - } -}; diff --git a/graph/pushRelabel.cpp b/graph/pushRelabel.cpp deleted file mode 100644 index 904aec6..0000000 --- a/graph/pushRelabel.cpp +++ /dev/null @@ -1,64 +0,0 @@ -struct Edge { - int to, rev; - ll f, c; -}; - -vector> adj; -vector> hs; -vector ec; -vector cur, H; - -void addEdge(int u, int v, ll c) { - adj[u].push_back({v, (int)sz(adj[v]), 0, c}); - adj[v].push_back({u, (int)sz(adj[u])-1, 0, 0}); -} - -void addFlow(Edge& e, ll f) { - if (ec[e.to] == 0 && f > 0) - hs[H[e.to]].push_back(e.to); - e.f += f; - adj[e.to][e.rev].f -= f; - ec[e.to] += f; - ec[adj[e.to][e.rev].to] -= f; -} - -ll maxFlow(int s, int t) { - int n = sz(adj); - hs.assign(2*n, {}); - ec.assign(n, 0); - cur.assign(n, 0); - H.assign(n, 0); - H[s] = n; - ec[t] = 1;//never set t to active... - vector co(2*n); - co[0] = n - 1; - for (Edge& e : adj[s]) addFlow(e, e.c); - for (int hi = 0;;) { - while (hs[hi].empty()) if (!hi--) return -ec[s]; - int v = hs[hi].back(); - hs[hi].pop_back(); - while (ec[v] > 0) { - if (cur[v] == sz(adj[v])) { - H[v] = 2*n; - for (int i = 0; i < sz(adj[v]); i++) { - Edge& e = adj[v][i]; - if (e.c - e.f > 0 && - H[v] > H[e.to] + 1) { - H[v] = H[e.to] + 1; - cur[v] = i; - }} - co[H[v]]++; - if (!--co[hi] && hi < n) { - for (int i = 0; i < n; i++) { - if (hi < H[i] && H[i] < n) { - co[H[i]]--; - H[i] = n + 1; - }}} - hi = H[v]; - } else { - Edge& e = adj[v][cur[v]]; - if (e.c - e.f > 0 && H[v] == H[e.to] + 1) { - addFlow(adj[v][cur[v]], min(ec[v], e.c - e.f)); - } else { - cur[v]++; -}}}}} diff --git a/graph/reroot.cpp b/graph/reroot.cpp deleted file mode 100644 index 4c6a748..0000000 --- a/graph/reroot.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Usual Tree DP can be broken down in 4 steps: -// - Initialize dp[v] = identity -// - Iterate over all children w and take a value for w -// by looking at dp[w] and possibly the edge label of v -> w -// - combine the values of those children -// usually this operation should be commutative and associative -// - finalize the dp[v] after iterating over all children -struct Reroot { - using T = ll; - - // identity element - T E() {} - // x: dp value of child - // e: index of edge going to child - T takeChild(T x, int e) {} - T comb(T x, T y) {} - // called after combining all dp values of children - T fin(T x, int v) {} - - vector>> g; - vector ord, pae; - vector dp; - - T dfs(int v) { - ord.push_back(v); - for (auto [w, e] : g[v]) { - g[w].erase(find(all(g[w]), pair(v, e^1))); - pae[w] = e^1; - dp[v] = comb(dp[v], takeChild(dfs(w), e)); - } - return dp[v] = fin(dp[v], v); - } - - vector solve(int n, vector> edges) { - g.resize(n); - for (int i = 0; i < n-1; i++) { - g[edges[i].first].emplace_back(edges[i].second, 2*i); - g[edges[i].second].emplace_back(edges[i].first, 2*i+1); - } - pae.assign(n, -1); - dp.assign(n, E()); - dfs(0); - vector updp(n, E()), res(n, E()); - for (int v : ord) { - vector pref(sz(g[v])+1), suff(sz(g[v])+1); - if (v != 0) pref[0] = takeChild(updp[v], pae[v]); - for (int i = 0; i < sz(g[v]); i++){ - auto [u, w] = g[v][i]; - pref[i+1] = suff[i] = takeChild(dp[u], w); - pref[i+1] = comb(pref[i], pref[i+1]); - } - for (int i = sz(g[v])-1; i >= 0; i--) { - suff[i] = comb(suff[i], suff[i+1]); - } - for (int i = 0; i < sz(g[v]); i++) { - updp[g[v][i].first] = fin(comb(pref[i], suff[i+1]), v); - } - res[v] = fin(pref.back(), v); - } - return res; - } -}; diff --git a/graph/scc.cpp b/graph/scc.cpp deleted file mode 100644 index ac9a40b..0000000 --- a/graph/scc.cpp +++ /dev/null @@ -1,32 +0,0 @@ -vector> adj, sccs; -int counter; -vector inStack; -vector low, idx, s; //idx enthält Index der SCC pro Knoten. - -void visit(int v) { - int old = low[v] = counter++; - s.push_back(v); inStack[v] = true; - - for (auto u : adj[v]) { - if (low[u] < 0) visit(u); - if (inStack[u]) low[v] = min(low[v], low[u]); - } - - if (old == low[v]) { - sccs.push_back({}); - for (int u = -1; u != v;) { - u = s.back(); s.pop_back(); inStack[u] = false; - idx[u] = sz(sccs) - 1; - sccs.back().push_back(u); -}}} - -void scc() { - inStack.assign(sz(adj), false); - low.assign(sz(adj), -1); - idx.assign(sz(adj), -1); - sccs.clear(); - - counter = 0; - for (int i = 0; i < sz(adj); i++) { - if (low[i] < 0) visit(i); -}} diff --git a/graph/stoerWagner.cpp b/graph/stoerWagner.cpp deleted file mode 100644 index 97e667a..0000000 --- a/graph/stoerWagner.cpp +++ /dev/null @@ -1,53 +0,0 @@ -struct Edge { - int from, to; - ll cap; -}; - -vector> adj, tmp; -vector erased; - -void merge(int u, int v) { - tmp[u].insert(tmp[u].end(), all(tmp[v])); - tmp[v].clear(); - erased[v] = true; - for (auto& vec : tmp) { - for (Edge& e : vec) { - if (e.from == v) e.from = u; - if (e.to == v) e.to = u; -}}} - -ll stoer_wagner() { - ll res = INF; - tmp = adj; - erased.assign(sz(tmp), false); - for (int i = 1; i < sz(tmp); i++) { - int s = 0; - while (erased[s]) s++; - priority_queue> pq; - pq.push({0, s}); - vector con(sz(tmp)); - ll cur = 0; - vector> state; - while (!pq.empty()) { - int c = pq.top().second; - pq.pop(); - if (con[c] < 0) continue; //already seen - con[c] = -1; - for (auto e : tmp[c]) { - if (con[e.to] >= 0) {//add edge to cut - con[e.to] += e.cap; - pq.push({con[e.to], e.to}); - cur += e.cap; - } else if (e.to != c) {//remove edge from cut - cur -= e.cap; - }} - state.push_back({cur, c}); - } - int t = state.back().second; - state.pop_back(); - if (state.empty()) return 0; //graph is not connected?! - merge(state.back().second, t); - res = min(res, state.back().first); - } - return res; -} diff --git a/graph/treeIsomorphism.cpp b/graph/treeIsomorphism.cpp deleted file mode 100644 index 4e9ddce..0000000 --- a/graph/treeIsomorphism.cpp +++ /dev/null @@ -1,15 +0,0 @@ -vector> adj; -map, int> known; - -int treeLabel(int v, int from = -1) { - vector children; - for (int u : adj[v]) { - if (u == from) continue; - children.push_back(treeLabel(u, v)); - } - sort(all(children)); - if (known.find(children) == known.end()) { - known[children] = sz(known); - } - return known[children]; -} diff --git a/graph/virtualTree.cpp b/graph/virtualTree.cpp deleted file mode 100644 index 2fcea80..0000000 --- a/graph/virtualTree.cpp +++ /dev/null @@ -1,22 +0,0 @@ -// needs dfs in- and out- time and lca function -vector in, out; - -void virtualTree(vector ind) { // indices of used nodes - sort(all(ind), [&](int x, int y) {return in[x] < in[y];}); - for (int i=0; i> tree(n); - vector st = {0}; - for (int i=1; i= out[ind[st.back()]]) st.pop_back(); - tree[st.back()].push_back(i); - st.push(i); - } - // virtual directed tree with n nodes, original indices in ind - // weights can be calculated, e.g. with binary lifting -} diff --git a/latexHeaders/code.sty b/latexHeaders/code.sty deleted file mode 100644 index a889596..0000000 --- a/latexHeaders/code.sty +++ /dev/null @@ -1,125 +0,0 @@ -% Colors, used for syntax highlighting. -% To print this document, set all colors to black! -\usepackage{xcolor} -\definecolor{safeRed}{HTML}{D7191C} -\definecolor{safeOrange}{HTML}{FFDE71} -\definecolor{safeYellow}{HTML}{FFFFBF} -\definecolor{safeGreen}{HTML}{99CF8F} -\definecolor{safeBlue}{HTML}{2B83BA} - -%try printer friendly colors? -%\colorlet{keyword}{safeBlue} -%\colorlet{string}{safeRed} -%\colorlet{comment}{safeGreen} -%\colorlet{identifier}{black} -\definecolor{type}{HTML}{2750A0} -\definecolor{string}{HTML}{7B3294} -\definecolor{comment}{HTML}{1A9641} -\definecolor{identifier}{HTML}{000000} -\definecolor{keyword}{HTML}{900000} - -% Source code listings. -\usepackage[scaled=0.80]{beramono} - -\usepackage{listings} -\lstset{ - language={[11]C++}, - numbers=left, - stepnumber=1, - numbersep=6pt, - numberstyle=\small, - breaklines=true, - breakautoindent=true, - breakatwhitespace=false, - numberblanklines=true, - postbreak=\space, - tabsize=2, - upquote=true, - basicstyle=\ttfamily\normalsize, - showspaces=false, - showstringspaces=false, - extendedchars=true, - keywordstyle=\color{keyword}\bfseries, - stringstyle=\color{string}\bfseries, - commentstyle=\color{comment}\bfseries\itshape, - identifierstyle=\color{identifier}, - directivestyle=\color{keyword}\bfseries, - emph={auto, int, long, long long, float, double, long double, char, bool, void, ll, ld, pt, lll, __int128, __float128, true, false, this, nullptr, INF, inf, EPS, eps}, - emphstyle=\color{type}\bfseries, - frame=trbl, - aboveskip=3pt, - belowskip=3pt, - deletestring=[b]{'},%fix digit separator but break char highlighting (fixed again with literate) - escapechar=@ - %moredelim=**[is][{\btHL[fill=green!30,draw=red,dashed,thin]}]{@}{@} -} - -\newcommand{\formatChar}[1]{{\color{string}\bfseries\textquotesingle{}#1\textquotesingle{}}} - -% Listings doesn't support UTF8. This is just enough for German umlauts. and commonly used chars -\lstset{literate=% - {'a'}{{\formatChar{a}}}3 - {'z'}{{\formatChar{z}}}3 - {'A'}{{\formatChar{A}}}3 - {'Z'}{{\formatChar{Z}}}3 - {'0'}{{\formatChar{0}}}3 - {'1'}{{\formatChar{1}}}3 - {'\$'}{{\formatChar{\$}}}3 - {'\#'}{{\formatChar{\#}}}3 - {Ö}{{\"O}}1 - {Ä}{{\"A}}1 - {Ü}{{\"U}}1 - {ß}{{\ss}}1 - {ü}{{\"u}}1 - {ä}{{\"a}}1 - {ö}{{\"o}}1 - {~}{{\textasciitilde}}1 -} - -\makeatletter -\let\orig@lstnumber=\thelstnumber -\newcommand\lstresetnumber{\global\let\thelstnumber=\orig@lstnumber} -\let\orig@placelstnumber=\lst@PlaceNumber -\gdef\lst@PlaceNumber{\orig@placelstnumber\lstresetnumber} -\newcommand\lstsettmpnumber[1]{\gdef\thelstnumber{#1}} - -\lst@AddToHook{OnEmptyLine}{% - \ifnum\value{lstnumber}>99 - \lstsettmpnumber{\_\_\_} - \else\ifnum\value{lstnumber}>9 - \lstsettmpnumber{\_\_} - \else - \lstsettmpnumber{\_} - \fi\fi -% \lstsettmpnumber{\_\_\kern-6pt}% - \vspace{-1.75ex}% - \addtocounter{lstnumber}{-1}% -} -% old: (change numberblanklines=false!) -%\lst@AddToHook{OnEmptyLine}{% -% \vspace{\dimexpr\baselineskip+0.5em}% -% \addtocounter{lstnumber}{-1}% -%} - -\newenvironment{btHighlight}[1][] -{\begingroup\tikzset{bt@Highlight@par/.style={#1}}\begin{lrbox}{\@tempboxa}} -{\end{lrbox}\bt@HL@box[bt@Highlight@par]{\@tempboxa}\endgroup} - -\newcommand\btHL[1][]{% - \begin{btHighlight}[#1]\bgroup\aftergroup\bt@HL@endenv% - } - \def\bt@HL@endenv{% - \end{btHighlight}% - \egroup% -} -\newcommand{\bt@HL@box}[2][]{% - \tikz[#1]{% - \pgfpathrectangle{\pgfpoint{1pt}{0pt}}{\pgfpoint{\wd #2}{\ht #2}}% - \pgfusepath{use as bounding box}% - \node[anchor=base west, fill=orange!30,outer sep=0pt,inner xsep=2.2pt, inner ysep=0pt, rounded corners=3pt, minimum height=\ht\strutbox+1pt,#1]{\raisebox{1pt}{\strut}\strut\usebox{#2}}; - }% -} -\makeatother - -\newcommand{\hl}[1]{\btHL[fill=safeOrange,draw=black,thin]{#1}} - diff --git a/latexHeaders/commands.sty b/latexHeaders/commands.sty deleted file mode 100644 index edbba1b..0000000 --- a/latexHeaders/commands.sty +++ /dev/null @@ -1,56 +0,0 @@ -% custom commands -\newcommand{\optional}[1]{ - \ifoptional - #1 - \fi} -\newcommand{\runtime}[1]{\ensuremath{\mathcal{O}\left(#1\right)}} -\newcommand{\code}[1]{\lstinline[breaklines=true]{#1}} -\let\codeSafe\lstinline - -\usepackage{tikz} -\usetikzlibrary{angles,quotes} - - -%new environment to define algorithms -\usepackage{ifthen} -\NewDocumentEnvironment{algorithm}{ O{required} m +b }{}{ - \ifthenelse{\equal{#1}{optional}}{% - \optional{ - \needspace{4\baselineskip}% - \subsection{#2\textcolor{gray}{(optional)}}% - #3% - } - }{% - \needspace{4\baselineskip}% - \subsection{#2}% - #3% - } -} - -%\ifthenelse{\equal{#3}{}}{}{\runtime{#3}} - -\newcommand{\sourcecode}[1]{% - \label{code:#1}% - \nobreak% -% \needspace{3\baselineskip}% -% \nopagebreak% - \lstinputlisting{#1}% - \penalty -1000% -} -\newcommand{\sourceref}[1]{{% - \color{comment}\bfseries\itshape{}Seite \pageref{code:#1}% -}} - -\newcommand{\method}[4][]{\texttt{#2}~~#3~~\runtime{#4}#1\par} - -\newenvironment{methods}[1][lll]{% - %\begin{minipage}{\linewidth}% - \renewcommand{\method}[4][]{\texttt{##2}&##3&\ifthenelse{\equal{##4}{}}{}{\runtime{##4}}##1\\}% - \begin{tabular}{@{}#1@{}}% -}{% - \end{tabular}% - %\end{minipage}% - \nobreak% - \needspace{3\baselineskip}% - \nobreak% -} diff --git a/latexHeaders/layout.sty b/latexHeaders/layout.sty deleted file mode 100644 index 096cf23..0000000 --- a/latexHeaders/layout.sty +++ /dev/null @@ -1,82 +0,0 @@ -% Don't waste space at the page borders. Use two column layout. -\usepackage[ - top=2cm, - bottom=1cm, - left=1cm, - right=1cm, - landscape -]{geometry} - -% Headline and bottomline. -\usepackage{scrlayer-scrpage} -\pagestyle{scrheadings} -\clearscrheadfoot -\ihead{\university} -\chead{\teamname} -\ohead{\pagemark} - -% Shift the title up to waste less space. -\usepackage{titling} -\setlength{\droptitle}{-8em} - -% Multicol layout for the table of contents. -\usepackage{multicol} -\usepackage{multirow} -\usepackage{array} - -% Automatically have table fill horizontal space. -\usepackage{makecell} -\usepackage{tabularx} -\newcolumntype{C}{>{\centering\arraybackslash}X} -\newcolumntype{L}{>{\raggedright\arraybackslash}X} -\newcolumntype{R}{>{\raggedleft\arraybackslash}X} -\newcolumntype{I}{!{\color{lightgray}\vrule}} -\usepackage{colortbl} -\newcommand{\grayhline}{\arrayrulecolor{lightgray}\hline - \arrayrulecolor{black}} - -% Nice table line. -\usepackage{booktabs} - -% Dingbats symbols. -\usepackage{pifont} - -% use less space... -%\usepackage[subtle, sections, indent, leading, charwidths]{savetrees} -\usepackage[moderate,sections]{savetrees} -\RedeclareSectionCommands[ - beforeskip=1pt plus 5pt, - afterskip=0.1pt plus 1.5pt -]{section,subsection,subsubsection} -\RedeclareSectionCommands[ - beforeskip=1pt plus 5pt, - afterskip=-1.2ex -]{paragraph} - -% dont indent paragagraphs -\setlength{\parindent}{0em} -\parskip=0pt - -% dont encourage breaks before lists -\@beginparpenalty=10000 - -% Nice enumerations without wasting space above and below. -\usepackage{relsize} -\usepackage{enumitem} -\setlist{nosep,leftmargin=2ex,labelwidth=1ex,labelsep=1ex} -\setlist[2]{leftmargin=3ex,label=\smaller[2]\ding{228}} -\setlist[3]{leftmargin=3ex,label=\larger\textbf{--}} -\setlist[description]{leftmargin=0pt} - -% decrease space for tables -\tabcolsep=2pt -\setlength\extrarowheight{0.3pt plus 1pt} - -\newenvironment{expandtable}{% - \begin{addmargin}{-3.4pt} -}{% - \end{addmargin} -} - -\usepackage{needspace} -\usepackage{setspace} diff --git a/latexHeaders/math.sty b/latexHeaders/math.sty deleted file mode 100644 index c34cc99..0000000 --- a/latexHeaders/math.sty +++ /dev/null @@ -1,98 +0,0 @@ -% For Headlines with math -\usepackage{bm} - -% Display math. -\usepackage{amsmath} -\usepackage{mathtools} -\usepackage{amssymb} -\usepackage{ntheorem} - -%\usepackage{pxfonts} -\usepackage[scaled=0.945,largesc,looser]{newpxtext}%better than pxfonts... -\usepackage[scaled=0.945,bigdelims]{newpxmath} -\let\mathbb\vmathbb - -\DeclareFontFamily{LMX}{npxexx}{} -\DeclareFontShape{LMX}{npxexx}{m}{n}{<-> s * [1.045] zplexx}{} -\DeclareFontShape{LMX}{npxexx}{b}{n}{<-> s * [1.045] zplbexx}{} -%\DeclareFontShape{LMX}{npxexx}{m}{n}{<-> s * [0.78] zplexx}{} -%\DeclareFontShape{LMX}{npxexx}{b}{n}{<-> s * [0.78] zplbexx}{} -\DeclareFontShape{LMX}{npxexx}{bx}{n}{<->ssub * npxexx/b/n}{} - -%\usepackage[scaled=0.91]{XCharter} -%\usepackage[scaled=0.89,type1]{cabin}% sans serif -%\usepackage[charter,varbb,scaled=1.00,noxchvw]{newtxmath} - -%\usepackage{libertine} -%\usepackage[libertine]{newtxmath} - -% New enviroment for remarks. -\theoremstyle{break} -\newtheorem{bem}{Bemerkung} - -% New commands for math operators. -% Binomial coefficients. -\renewcommand{\binom}[2]{ - \Bigl( - \begin{matrix} - #1 \\ - #2 - \end{matrix} - \Bigr) -} -% Euler numbers, first kind. -\newcommand{\eulerI}[2]{ - \Bigl\langle - \begin{matrix} - #1 \\ - #2 - \end{matrix} - \Bigr\rangle -} -% Euler numbers, second kind. -\newcommand{\eulerII}[2]{ - \Bigl\langle\mkern-4mu\Bigl\langle - \begin{matrix} - #1 \\ - #2 - \end{matrix} - \Bigr\rangle\mkern-4mu\Bigr\rangle -} -% Stirling numbers, first kind. -\newcommand{\stirlingI}[2]{ - \Bigl[ - \begin{matrix} - #1 \\ - #2 - \end{matrix} - \Bigr] -} -% Stirling numbers, second kind. -\newcommand{\stirlingII}[2]{ - \Bigl\{ - \begin{matrix} - #1 \\ - #2 - \end{matrix} - \Bigr\} -} -% Legendre symbol. -\newcommand{\legendre}[2]{ - \Bigl( - \dfrac{#1}{#2} - \Bigr) -} -% Expectation values. -\newcommand{\E}{\text{E}} -% Greates common divisor. -\newcommand{\ggT}{\text{ggT}} -% sign for negative values -\newcommand{\sign}{\scalebox{0.66}[1.0]{\( - \)}} -% absolute values -\newcommand{\abs}[1]{\left|#1\right|} -% ceiling function -\newcommand{\ceil}[1]{\left\lceil#1\right\rceil} -% floor function -\newcommand{\floor}[1]{\left\lfloor#1\right\rfloor} -% multiplication -\renewcommand{\*}{\ensuremath{\cdotp}} diff --git a/math/berlekampMassey.cpp b/math/berlekampMassey.cpp deleted file mode 100644 index 29e084f..0000000 --- a/math/berlekampMassey.cpp +++ /dev/null @@ -1,31 +0,0 @@ -constexpr ll mod = 1'000'000'007; -vector BerlekampMassey(const vector& s) { - int n = sz(s), L = 0, m = 0; - vector C(n), B(n), T; - C[0] = B[0] = 1; - - ll b = 1; - for (int i = 0; i < n; i++) { - m++; - ll d = s[i] % mod; - for (int j = 1; j <= L; j++) { - d = (d + C[j] * s[i - j]) % mod; - } - if (!d) continue; - T = C; - ll coef = d * powMod(b, mod-2, mod) % mod; - for (int j = m; j < n; j++) { - C[j] = (C[j] - coef * B[j - m]) % mod; - } - if (2 * L > i) continue; - L = i + 1 - L; - swap(B, T); - b = d; - m = 0; - } - - C.resize(L + 1); - C.erase(C.begin()); - for (auto& x : C) x = (mod - x) % mod; - return C; -} diff --git a/math/bigint.cpp b/math/bigint.cpp deleted file mode 100644 index 6f83a93..0000000 --- a/math/bigint.cpp +++ /dev/null @@ -1,275 +0,0 @@ -// base and base_digits must be consistent -constexpr ll base = 1'000'000; -constexpr ll base_digits = 6; -struct bigint { - vll a; ll sign; - - bigint() : sign(1) {} - - bigint(ll v) {*this = v;} - - bigint(const string &s) {read(s);} - - void operator=(const bigint& v) { - sign = v.sign; - a = v.a; - } - - void operator=(ll v) { - sign = 1; - if (v < 0) sign = -1, v = -v; - a.clear(); - for (; v > 0; v = v / base) - a.push_back(v % base); - } - - bigint operator+(const bigint& v) const { - if (sign == v.sign) { - bigint res = v; - for (ll i = 0, carry = 0; i < max(sz(a), sz(v.a)) || carry; ++i) { - if (i == sz(res.a)) - res.a.push_back(0); - res.a[i] += carry + (i < sz(a) ? a[i] : 0); - carry = res.a[i] >= base; - if (carry) - res.a[i] -= base; - } - return res; - } - return *this - (-v); - } - - bigint operator-(const bigint& v) const { - if (sign == v.sign) { - if (abs() >= v.abs()) { - bigint res = *this; - for (ll i = 0, carry = 0; i < sz(v.a) || carry; ++i) { - res.a[i] -= carry + (i < sz(v.a) ? v.a[i] : 0); - carry = res.a[i] < 0; - if (carry) res.a[i] += base; - } - res.trim(); - return res; - } - return -(v - *this); - } - return *this + (-v); - } - - void operator*=(ll v) { - if (v < 0) sign = -sign, v = -v; - for (ll i = 0, carry = 0; i < sz(a) || carry; ++i) { - if (i == sz(a)) a.push_back(0); - ll cur = a[i] * v + carry; - carry = cur / base; - a[i] = cur % base; - } - trim(); - } - - bigint operator*(ll v) const { - bigint res = *this; - res *= v; - return res; - } - - friend pair divmod(const bigint& a1, const bigint& b1) { - ll norm = base / (b1.a.back() + 1); - bigint a = a1.abs() * norm; - bigint b = b1.abs() * norm; - bigint q, r; - q.a.resize(sz(a.a)); - for (ll i = sz(a.a) - 1; i >= 0; i--) { - r *= base; - r += a.a[i]; - ll s1 = sz(r.a) <= sz(b.a) ? 0 : r.a[sz(b.a)]; - ll s2 = sz(r.a) <= sz(b.a) - 1 ? 0 : r.a[sz(b.a) - 1]; - ll d = (base * s1 + s2) / b.a.back(); - r -= b * d; - while (r < 0) r += b, --d; - q.a[i] = d; - } - q.sign = a1.sign * b1.sign; - r.sign = a1.sign; - q.trim(); - r.trim(); - return make_pair(q, r / norm); - } - - bigint operator/(const bigint& v) const { - return divmod(*this, v).first; - } - - bigint operator%(const bigint& v) const { - return divmod(*this, v).second; - } - - void operator/=(ll v) { - if (v < 0) sign = -sign, v = -v; - for (ll i = sz(a) - 1, rem = 0; i >= 0; --i) { - ll cur = a[i] + rem * base; - a[i] = cur / v; - rem = cur % v; - } - trim(); - } - - bigint operator/(ll v) const { - bigint res = *this; - res /= v; - return res; - } - - ll operator%(ll v) const { - if (v < 0) v = -v; - ll m = 0; - for (ll i = sz(a) - 1; i >= 0; --i) - m = (a[i] + m * base) % v; - return m * sign; - } - - void operator+=(const bigint& v) { - *this = *this + v; - } - void operator-=(const bigint& v) { - *this = *this - v; - } - void operator*=(const bigint& v) { - *this = *this * v; - } - void operator/=(const bigint& v) { - *this = *this / v; - } - - bool operator<(const bigint& v) const { - if (sign != v.sign) return sign < v.sign; - if (sz(a) != sz(v.a)) - return sz(a) * sign < sz(v.a) * v.sign; - for (ll i = sz(a) - 1; i >= 0; i--) - if (a[i] != v.a[i]) - return a[i] * sign < v.a[i] * sign; - return false; - } - - bool operator>(const bigint& v) const { - return v < *this; - } - bool operator<=(const bigint& v) const { - return !(v < *this); - } - bool operator>=(const bigint& v) const { - return !(*this < v); - } - bool operator==(const bigint& v) const { - return !(*this < v) && !(v < *this); - } - bool operator!=(const bigint& v) const { - return *this < v || v < *this; - } - - void trim() { - while (!a.empty() && !a.back()) a.pop_back(); - if (a.empty()) sign = 1; - } - - bool isZero() const { - return a.empty() || (sz(a) == 1 && a[0] == 0); - } - - bigint operator-() const { - bigint res = *this; - res.sign = -sign; - return res; - } - - bigint abs() const { - bigint res = *this; - res.sign *= res.sign; - return res; - } - - ll longValue() const { - ll res = 0; - for (ll i = sz(a) - 1; i >= 0; i--) - res = res * base + a[i]; - return res * sign; - } - - void read(const string& s) { - sign = 1; - a.clear(); - ll pos = 0; - while (pos < sz(s) && (s[pos] == '-' || s[pos] == '+')) { - if (s[pos] == '-') sign = -sign; - ++pos; - } - for (ll i = sz(s) - 1; i >= pos; i -= base_digits) { - ll x = 0; - for (ll j = max(pos, i - base_digits + 1); j <= i; j++) - x = x * 10 + s[j] - '0'; - a.push_back(x); - } - trim(); - } - - friend istream& operator>>(istream& stream, bigint& v) { - string s; - stream >> s; - v.read(s); - return stream; - } - - friend ostream& operator<<(ostream& stream, const bigint& v) { - if (v.sign == -1) stream << '-'; - stream << (v.a.empty() ? 0 : v.a.back()); - for (ll i = sz(v.a) - 2; i >= 0; --i) - stream << setw(base_digits) << setfill('0') << v.a[i]; - return stream; - } - - static vll karatsubaMultiply(const vll& a, const vll& b) { - ll n = sz(a); - vll res(n + n); - if (n <= 32) { - for (ll i = 0; i < n; i++) - for (ll j = 0; j < n; j++) - res[i + j] += a[i] * b[j]; - return res; - } - ll k = n >> 1; - vll a1(a.begin(), a.begin() + k); - vll a2(a.begin() + k, a.end()); - vll b1(b.begin(), b.begin() + k); - vll b2(b.begin() + k, b.end()); - vll a1b1 = karatsubaMultiply(a1, b1); - vll a2b2 = karatsubaMultiply(a2, b2); - for (ll i = 0; i < k; i++) a2[i] += a1[i]; - for (ll i = 0; i < k; i++) b2[i] += b1[i]; - vll r = karatsubaMultiply(a2, b2); - for (ll i = 0; i < sz(a1b1); i++) r[i] -= a1b1[i]; - for (ll i = 0; i < sz(a2b2); i++) r[i] -= a2b2[i]; - for (ll i = 0; i < sz(r); i++) res[i + k] += r[i]; - for (ll i = 0; i < sz(a1b1); i++) res[i] += a1b1[i]; - for (ll i = 0; i < sz(a2b2); i++) res[i + n] += a2b2[i]; - return res; - } - - bigint operator*(const bigint& v) const { - vll a(this->a.begin(), this->a.end()); - vll b(v.a.begin(), v.a.end()); - while (sz(a) < sz(b)) a.push_back(0); - while (sz(b) < sz(a)) b.push_back(0); - while (sz(a) & (sz(a) - 1)) - a.push_back(0), b.push_back(0); - vll c = karatsubaMultiply(a, b); - bigint res; - res.sign = sign * v.sign; - for (ll i = 0, carry = 0; i < sz(c); i++) { - ll cur = c[i] + carry; - res.a.push_back(cur % base); - carry = cur / base; - } - res.trim(); - return res; - } -}; diff --git a/math/binomial0.cpp b/math/binomial0.cpp deleted file mode 100644 index 896a0f1..0000000 --- a/math/binomial0.cpp +++ /dev/null @@ -1,14 +0,0 @@ -constexpr ll lim = 10'000'000; -ll fac[lim], inv[lim]; - -void precalc() { - fac[0] = inv[0] = 1; - for (int i = 1; i < lim; i++) fac[i] = fac[i-1] * i % mod; - inv[lim - 1] = multInv(fac[lim - 1], mod); - for (int i = lim - 1; i > 0; i--) inv[i-1] = inv[i] * i % mod; -} - -ll calc_binom(ll n, ll k) { - if (n < 0 || n < k || k < 0) return 0; - return (inv[n] * inv[n-k] % mod) * fac[k] % mod; -} diff --git a/math/binomial1.cpp b/math/binomial1.cpp deleted file mode 100644 index dab20b3..0000000 --- a/math/binomial1.cpp +++ /dev/null @@ -1,8 +0,0 @@ -ll calc_binom(ll n, ll k) { - if (k > n) return 0; - ll r = 1; - for (ll d = 1; d <= k; d++) {// Reihenfolge => Teilbarkeit - r *= n--, r /= d; - } - return r; -} diff --git a/math/binomial2.cpp b/math/binomial2.cpp deleted file mode 100644 index 4531505..0000000 --- a/math/binomial2.cpp +++ /dev/null @@ -1,32 +0,0 @@ -constexpr ll mod = 1'000'000'009; - -ll binomPPow(ll n, ll k, ll p) { - ll res = 1; - if (p > n) { - } else if (p > n - k || (p * p > n && n % p < k % p)) { - res *= p; - res %= mod; - } else if (p * p <= n) { - ll c = 0, tmpN = n, tmpK = k; - while (tmpN > 0) { - if (tmpN % p < tmpK % p + c) { - res *= p; - res %= mod; - c = 1; - } else c = 0; - tmpN /= p; - tmpK /= p; - }} - return res; -} - -ll calc_binom(ll n, ll k) { - if (k > n) return 0; - ll res = 1; - k = min(k, n - k); - for (ll i = 0; primes[i] <= n; i++) { - res *= binomPPow(n, k, primes[i]); - res %= mod; - } - return res; -} diff --git a/math/binomial3.cpp b/math/binomial3.cpp deleted file mode 100644 index f52337c..0000000 --- a/math/binomial3.cpp +++ /dev/null @@ -1,10 +0,0 @@ -ll calc_binom(ll n, ll k, ll p) { - assert(n < p) //wichtig: sonst falsch! - if (k > n) return 0; - ll x = k % 2 != 0 ? p-1 : 1; - for (ll c = p-1; c > n; c--) { - x *= c - k; x %= p; - x *= multInv(c, p); x %= p; - } - return x; -} diff --git a/math/chineseRemainder.cpp b/math/chineseRemainder.cpp deleted file mode 100644 index ccbc5dc..0000000 --- a/math/chineseRemainder.cpp +++ /dev/null @@ -1,14 +0,0 @@ -struct CRT { - using lll = __int128; - lll M = 1, sol = 0; // Solution unique modulo M - bool hasSol = true; - - // Adds congruence x = a (mod m) - void add(ll a, ll m) { - auto [d, s, t] = extendedEuclid(M, m); - if((a - sol) % d != 0) hasSol = false; - lll z = M/d * s; - M *= m/d; - sol = (z % M * (a-sol) % M + sol + M) % M; - } -}; diff --git a/math/cycleDetection.cpp b/math/cycleDetection.cpp deleted file mode 100644 index 621af82..0000000 --- a/math/cycleDetection.cpp +++ /dev/null @@ -1,16 +0,0 @@ -void cycleDetection(ll x0, function f) { - ll a = x0, b = f(x0), length = 1; - for (ll power = 1; a != b; b = f(b), length++) { - if (power == length) { - power *= 2; - length = 0; - a = b; - }} - ll start = 0; - a = x0; b = x0; - for (ll i = 0; i < length; i++) b = f(b); - while (a != b) { - a = f(a); - b = f(b); - start++; -}} diff --git a/math/discreteLogarithm.cpp b/math/discreteLogarithm.cpp deleted file mode 100644 index d9227b9..0000000 --- a/math/discreteLogarithm.cpp +++ /dev/null @@ -1,14 +0,0 @@ -ll dlog(ll a, ll b, ll m) { - ll bound = sqrtl(m) + 1; //memory usage bound - map vals; - for (ll i = 0, e = 1; i < bound; i++, e = (e * a) % m) { - vals[e] = i; - } - ll fact = powMod(a, m - bound - 1, m); - - for (ll i = 0; i < m; i += bound, b = (b * fact) % m) { - if (vals.count(b)) { - return i + vals[b]; - }} - return -1; -} diff --git a/math/discreteNthRoot.cpp b/math/discreteNthRoot.cpp deleted file mode 100644 index 7201b2b..0000000 --- a/math/discreteNthRoot.cpp +++ /dev/null @@ -1,5 +0,0 @@ -ll root(ll a, ll b, ll m) { - ll g = findPrimitive(m); - ll c = dlog(powMod(g, a, m), b, m); //dLog @\sourceref{math/discreteLogarithm.cpp}@ - return c < 0 ? -1 : powMod(g, c, m); -} diff --git a/math/divisors.cpp b/math/divisors.cpp deleted file mode 100644 index 5afd4fb..0000000 --- a/math/divisors.cpp +++ /dev/null @@ -1,11 +0,0 @@ -ll countDivisors(ll n) { - ll res = 1; - for (ll i = 2; i * i * i <= n; i++) { - ll c = 0; - while (n % i == 0) {n /= i; c++;} - res *= c + 1; - } - if (isPrime(n)) res *= 2; - else if (n > 1) res *= isSquare(n) ? 3 : 4; - return res; -} diff --git a/math/extendedEuclid.cpp b/math/extendedEuclid.cpp deleted file mode 100644 index ecf4a16..0000000 --- a/math/extendedEuclid.cpp +++ /dev/null @@ -1,6 +0,0 @@ -// a*x + b*y = ggt(a, b) -array extendedEuclid(ll a, ll b) { - if (a == 0) return {b, 0, 1}; - auto [d, x, y] = extendedEuclid(b % a, a); - return {d, y - (b / a) * x, x}; -} diff --git a/math/gauss.cpp b/math/gauss.cpp deleted file mode 100644 index 3e3b7aa..0000000 --- a/math/gauss.cpp +++ /dev/null @@ -1,36 +0,0 @@ -void normalLine(int line) { - double factor = mat[line][line]; - for (double& x : mat[line]) x /= factor; -} - -void takeAll(int n, int line) { - for (int i = 0; i < n; i++) { - if (i == line) continue; - double diff = mat[i][line]; - for (int j = 0; j <= n; j++) { - mat[i][j] -= diff * mat[line][j]; -}}} - -int gauss(int n) { - vector done(n, false); - for (int i = 0; i < n; i++) { - int swappee = i; // Sucht Pivotzeile für bessere Stabilität. - for (int j = 0; j < n; j++) { - if (done[j]) continue; - if (abs(mat[j][i]) > abs(mat[i][i])) swappee = j; - } - swap(mat[i], mat[swappee]); - if (abs(mat[i][i]) > EPS) { - normalLine(i); - takeAll(n, i); - done[i] = true; - }} - // Ab jetzt nur checks bzgl. Eindeutigkeit/Existenz der Lösung. - for (int i = 0; i < n; i++) { - bool allZero = true; - for (int j = i; j < n; j++) allZero &= abs(mat[i][j]) <= EPS; - if (allZero && abs(mat[i][n]) > EPS) return INCONSISTENT; - if (allZero && abs(mat[i][n]) <= EPS) return MULTIPLE; - } - return UNIQUE; -} diff --git a/math/gcd-lcm.cpp b/math/gcd-lcm.cpp deleted file mode 100644 index a1c63c8..0000000 --- a/math/gcd-lcm.cpp +++ /dev/null @@ -1,2 +0,0 @@ -ll gcd(ll a, ll b) {return b == 0 ? a : gcd(b, a % b);} -ll lcm(ll a, ll b) {return a * (b / gcd(a, b));} diff --git a/math/goldenSectionSearch.cpp b/math/goldenSectionSearch.cpp deleted file mode 100644 index 20b15e8..0000000 --- a/math/goldenSectionSearch.cpp +++ /dev/null @@ -1,15 +0,0 @@ -ld gss(ld l, ld r, function f) { - ld inv = (sqrt(5.0l) - 1) / 2; - ld x1 = r - inv*(r-l), x2 = l + inv*(r-l); - ld f1 = f(x1), f2 = f(x2); - for (int i = 0; i < 200; i++) { - if (f1 < f2) { //change to > to find maximum - u = x2; x2 = x1; f2 = f1; - x1 = r - inv*(r-l); f1 = f(x1); - } else { - l = x1; x1 = x2; f1 = f2; - x2 = l + inv*(r-l); f2 = f(x2); - } - } - return l; -} diff --git a/math/inversions.cpp b/math/inversions.cpp deleted file mode 100644 index 9e47f9b..0000000 --- a/math/inversions.cpp +++ /dev/null @@ -1,9 +0,0 @@ -ll inversions(const vector& v) { - Tree> t; //ordered statistics tree @\sourceref{datastructures/pbds.cpp}@ - ll res = 0; - for (ll i = 0; i < sz(v); i++) { - res += i - t.order_of_key({v[i], i}); - t.insert({v[i], i}); - } - return res; -} diff --git a/math/inversionsMerge.cpp b/math/inversionsMerge.cpp deleted file mode 100644 index 8235b11..0000000 --- a/math/inversionsMerge.cpp +++ /dev/null @@ -1,27 +0,0 @@ -// Laufzeit: O(n*log(n)) -ll merge(vector& v, vector& left, vector& right) { - int a = 0, b = 0, i = 0; - ll inv = 0; - while (a < sz(left) && b < sz(right)) { - if (left[a] < right[b]) v[i++] = left[a++]; - else { - inv += sz(left) - a; - v[i++] = right[b++]; - } - } - while (a < sz(left)) v[i++] = left[a++]; - while (b < sz(right)) v[i++] = right[b++]; - return inv; -} - -ll mergeSort(vector &v) { // Sortiert v und gibt Inversionszahl zurück. - int n = sz(v); - vector left(n / 2), right((n + 1) / 2); - for (int i = 0; i < n / 2; i++) left[i] = v[i]; - for (int i = n / 2; i < n; i++) right[i - n / 2] = v[i]; - - ll result = 0; - if (sz(left) > 1) result += mergeSort(left); - if (sz(right) > 1) result += mergeSort(right); - return result + merge(v, left, right); -} diff --git a/math/kthperm.cpp b/math/kthperm.cpp deleted file mode 100644 index 899dff1..0000000 --- a/math/kthperm.cpp +++ /dev/null @@ -1,14 +0,0 @@ -vector kthperm(ll k, ll n) { - Tree t; - vector res(n); - for (ll i = 1; i <= n; k /= i, i++) { - t.insert(i - 1); - res[n - i] = k % i; - } - for (ll& x : res) { - auto it = t.find_by_order(x); - x = *it; - t.erase(it); - } - return res; -} diff --git a/math/legendre.cpp b/math/legendre.cpp deleted file mode 100644 index f08755f..0000000 --- a/math/legendre.cpp +++ /dev/null @@ -1,4 +0,0 @@ -ll legendre(ll a, ll p) { - ll s = powMod(a, p / 2, p); - return s < 2 ? s : -1ll; -} diff --git a/math/lgsFp.cpp b/math/lgsFp.cpp deleted file mode 100644 index 7081fea..0000000 --- a/math/lgsFp.cpp +++ /dev/null @@ -1,26 +0,0 @@ -void normalLine(int line, ll p) { - ll factor = multInv(mat[line][line], p); - for (ll& x : mat[line]) x = (x * factor) % p; -} - -void takeAll(int n, int line, ll p) { - for (int i = 0; i < n; i++) { - if (i == line) continue; - ll diff = mat[i][line]; - for (int j = 0; j <= n; j++) { - mat[i][j] -= (diff * mat[line][j]) % p; - mat[i][j] = (mat[i][j] + p) % p; -}}} - -void gauss(int n, ll mod) { - vector done(n, false); - for (int i = 0; i < n; i++) { - int j = 0; - while (j < n && (done[j] || mat[j][i] == 0)) j++; - if (j == n) continue; - swap(mat[i], mat[j]); - normalLine(i, mod); - takeAll(n, i, mod); - done[i] = true; -}} -// für Eindeutigkeit, Existenz etc. siehe LGS über R diff --git a/math/linearCongruence.cpp b/math/linearCongruence.cpp deleted file mode 100644 index cdb5a37..0000000 --- a/math/linearCongruence.cpp +++ /dev/null @@ -1,5 +0,0 @@ -ll solveLinearCongruence(ll a, ll b, ll m) { - ll g = gcd(a, m); - if (b % g != 0) return -1; - return ((b / g) * multInv(a / g, m / g)) % (m / g); -} diff --git a/math/linearRecurence.cpp b/math/linearRecurence.cpp deleted file mode 100644 index 2501e64..0000000 --- a/math/linearRecurence.cpp +++ /dev/null @@ -1,33 +0,0 @@ -constexpr ll mod = 1'000'000'007; -vector modMul(const vector& a, const vector& b, - const vector& c) { - ll n = sz(c); - vector res(n * 2 + 1); - for (int i = 0; i <= n; i++) { //a*b - for (int j = 0; j <= n; j++) { - res[i + j] += a[i] * b[j]; - res[i + j] %= mod; - }} - for (int i = 2 * n; i > n; i--) { //res%c - for (int j = 0; j < n; j++) { - res[i - 1 - j] += res[i] * c[j]; - res[i - 1 - j] %= mod; - }} - res.resize(n + 1); - return res; -} - -ll kthTerm(const vector& f, const vector& c, ll k) { - assert(sz(f) == sz(c)); - vector tmp(sz(c) + 1), a(sz(c) + 1); - tmp[0] = a[1] = 1; //tmp = (x^k) % c - - for (k++; k > 0; k /= 2) { - if (k & 1) tmp = modMul(tmp, a, c); - a = modMul(a, a, c); - } - - ll res = 0; - for (int i = 0; i < sz(c); i++) res += (tmp[i+1] * f[i]) % mod; - return res % mod; -} diff --git a/math/linearSieve.cpp b/math/linearSieve.cpp deleted file mode 100644 index b029b9a..0000000 --- a/math/linearSieve.cpp +++ /dev/null @@ -1,49 +0,0 @@ -constexpr ll N = 10'000'000; -ll smallest[N], power[N], sieved[N]; -vector primes; - -//wird aufgerufen mit (p^k, p, k) für prime p -ll mu(ll pk, ll p, ll k) {return -(k == 1);} -ll phi(ll pk, ll p, ll k) {return pk - pk / p;} -ll div(ll pk, ll p, ll k) {return k+1;} -ll divSum(ll pk, ll p, ll k) {return (pk*p+1) / (p - 1);} -ll square(ll pk, ll p, ll k) {return k % 2 ? pk / p : pk;} -ll squareFree(ll pk, ll p, ll k) {return k % 2 ? pk : 1;} - -void sieve() { // O(N) - smallest[1] = power[1] = sieved[1] = 1; - for (ll i = 2; i < N; i++) { - if (smallest[i] == 0) { - primes.push_back(i); - for (ll pk = i, k = 1; pk < N; pk *= i, k++) { - smallest[pk] = i; - power[pk] = pk; - sieved[pk] = mu(pk, i, k); // Aufruf ändern! - }} - for (ll j = 0; i * primes[j] < N && primes[j] < smallest[i]; j++) { - ll k = i * primes[j]; - smallest[k] = power[k] = primes[j]; - sieved[k] = sieved[i] * sieved[primes[j]]; - } - if (i * smallest[i] < N && power[i] != i) { - ll k = i * smallest[i]; - smallest[k] = smallest[i]; - power[k] = power[i] * smallest[i]; - sieved[k] = sieved[power[k]] * sieved[k / power[k]]; -}}} - -ll naive(ll n) { // O(sqrt(n)) - ll res = 1; - for (ll p = 2; p * p <= n; p++) { - if (n % p == 0) { - ll pk = 1; - ll k = 0; - do { - n /= p; - pk *= p; - k++; - } while (n % p == 0); - res *= mu(pk, p, k); // Aufruf ändern! - }} - return res; -} diff --git a/math/longestIncreasingSubsequence.cpp b/math/longestIncreasingSubsequence.cpp deleted file mode 100644 index fcb63b4..0000000 --- a/math/longestIncreasingSubsequence.cpp +++ /dev/null @@ -1,17 +0,0 @@ -vector lis(vector& a) { - int n = sz(a), len = 0; - vector dp(n, INF), dp_id(n), prev(n); - for (int i = 0; i < n; i++) { - int pos = lower_bound(all(dp), a[i]) - dp.begin(); - dp[pos] = a[i]; - dp_id[pos] = i; - prev[i] = pos ? dp_id[pos - 1] : -1; - len = max(len, pos + 1); - } - // reconstruction - vector res(len); - for (int x = dp_id[len-1]; len--; x = prev[x]) { - res[len] = x; - } - return res; // indices of one LIS -} diff --git a/math/math.tex b/math/math.tex deleted file mode 100644 index c157e1b..0000000 --- a/math/math.tex +++ /dev/null @@ -1,535 +0,0 @@ -\section{Mathe} - -\begin{algorithm}{Zykel Erkennung} - \begin{methods} - \method{cycleDetection}{findet Zyklus von $x_0$ und Länge in $f$}{b+l} - \end{methods} - \sourcecode{math/cycleDetection.cpp} -\end{algorithm} - -\begin{algorithm}{Longest Increasing Subsequence} - \begin{itemize} - \item \code{lower\_bound} $\Rightarrow$ streng monoton - \item \code{upper\_bound} $\Rightarrow$ monoton - \end{itemize} - \sourcecode{math/longestIncreasingSubsequence.cpp} -\end{algorithm} - -\begin{algorithm}{Permutationen} - \begin{methods} - \method{kthperm}{findet $k$-te Permutation \big($k \in [0, n!$)\big)}{n\*\log(n)} - \end{methods} - \sourcecode{math/kthperm.cpp} - \begin{methods} - \method{permIndex}{bestimmt Index der Permutation \big($\mathit{res} \in [0, n!$)\big)}{n\*\log(n)} - \end{methods} - \sourcecode{math/permIndex.cpp} -\end{algorithm} -\clearpage - -\subsection{Mod-Exponent und Multiplikation über $\boldsymbol{\mathbb{F}_p}$} -%\vspace{-1.25em} -%\begin{multicols}{2} -\method{mulMod}{berechnet $a \cdot b \bmod n$}{\log(b)} -\sourcecode{math/modMulIterativ.cpp} -% \vfill\null\columnbreak -\method{powMod}{berechnet $a^b \bmod n$}{\log(b)} -\sourcecode{math/modPowIterativ.cpp} -%\end{multicols} -%\vspace{-2.75em} -\begin{itemize} - \item für $a > 10^9$ \code{__int128} oder \code{modMul} benutzten! -\end{itemize} - -\begin{algorithm}{ggT, kgV, erweiterter euklidischer Algorithmus} - \runtime{\log(a) + \log(b)} - \sourcecode{math/extendedEuclid.cpp} -\end{algorithm} - -\subsection{Multiplikatives Inverses von $\boldsymbol{x}$ in $\boldsymbol{\mathbb{Z}/m\mathbb{Z}}$} -\textbf{Falls $\boldsymbol{m}$ prim:}\quad $x^{-1} \equiv x^{m-2} \bmod m$ - -\textbf{Falls $\boldsymbol{\ggT(x, m) = 1}$:} -\begin{itemize} - \item Erweiterter euklidischer Algorithmus liefert $\alpha$ und $\beta$ mit - $\alpha x + \beta m = 1$. - \item Nach Kongruenz gilt $\alpha x + \beta m \equiv \alpha x \equiv 1 \bmod m$. - \item $x^{-1} :\equiv \alpha \bmod m$ -\end{itemize} -\textbf{Sonst $\boldsymbol{\ggT(x, m) > 1}$:}\quad Es existiert kein $x^{-1}$. -% \sourcecode{math/multInv.cpp} -\sourcecode{math/shortModInv.cpp} - -\paragraph{Lemma von \textsc{Bézout}} -Sei $(x, y)$ eine Lösung der diophantischen Gleichung $ax + by = d$. -Dann lassen sich wie folgt alle Lösungen berechnen: -\[ -\left(x + k\frac{b}{\ggT(a, b)},~y - k\frac{a}{\ggT(a, b)}\right) -\] - -\paragraph{\textsc{Pell}-Gleichungen} -Sei $(\overline{x}, \overline{y})$ die Lösung von $x^2 - ny^2 = 1$, die $x>1$ minimiert. -Sei $(\tilde{x}, \tilde{y})$ die Lösung von $x^2-ny^2 = c$, die $x>1$ minimiert. Dann lassen -sich alle Lösungen von $x^2-ny^2=c$ berechnen durch: -\begin{align*} - x_1&\coloneqq \tilde{x}, & y_1&\coloneqq\tilde{y}\\ - x_{k+1}&\coloneqq \overline{x}x_k+n\overline{y}y_k, & y_{k+1}&\coloneqq\overline{x}y_k+\overline{y}x_k -\end{align*} - -\begin{algorithm}{Lineare Kongruenz} - \begin{itemize} - \item Löst $ax\equiv b\pmod{m}$. - \item Weitere Lösungen unterscheiden sich um \raisebox{2pt}{$\frac{m}{g}$}, es gibt - also $g$ Lösungen modulo $m$. - \end{itemize} - \sourcecode{math/linearCongruence.cpp} -\end{algorithm} - -\begin{algorithm}{Chinesischer Restsatz} - \begin{itemize} - \item Extrem anfällig gegen Overflows. Evtl. häufig 128-Bit Integer verwenden. - \item Direkte Formel für zwei Kongruenzen $x \equiv a \bmod n$, $x \equiv b \bmod m$: - \[ - x \equiv a - y \cdot n \cdot \frac{a - b}{d} \bmod \frac{mn}{d} - \qquad \text{mit} \qquad - d := \ggT(n, m) = yn + zm - \] - Formel kann auch für nicht teilerfremde Moduli verwendet werden. - Sind die Moduli nicht teilerfremd, existiert genau dann eine Lösung, - wenn $a\equiv~b \bmod \ggT(m, n)$. - In diesem Fall sind keine Faktoren - auf der linken Seite erlaubt. - \end{itemize} - \sourcecode{math/chineseRemainder.cpp} -\end{algorithm} - -\begin{algorithm}{Primzahltest \& Faktorisierung} - \method{isPrime}{prüft ob Zahl prim ist}{\log(n)^2} - \sourcecode{math/millerRabin.cpp} - \method{rho}{findet zufälligen Teiler}{\sqrt[\leftroot{3}\uproot{2}4]{n}} - \sourcecode{math/rho.cpp} - %\method{squfof}{findet zufälligen Teiler}{\sqrt[\leftroot{4}\uproot{2}4]{n}} - %\sourcecode{math/squfof.cpp} -\end{algorithm} - -\begin{algorithm}{Teiler} - \begin{methods} - \method{countDivisors}{Zählt Teiler von $n$}{\sqrt[\leftroot{3}\uproot{2}3]{n}} - \end{methods} - \sourcecode{math/divisors.cpp} -\end{algorithm} - -\begin{algorithm}{Numerisch Extremstelle bestimmen} - \sourcecode{math/goldenSectionSearch.cpp} -\end{algorithm} - -\begin{algorithm}{Numerisch Integrieren, Simpsonregel} - \sourcecode{math/simpson.cpp} -\end{algorithm} - -\begin{algorithm}{Diskreter Logarithmus} - \begin{methods} - \method{solve}{bestimmt Lösung $x$ für $a^x=b \bmod m$}{\sqrt{m}\*\log(m)} - \end{methods} - \sourcecode{math/discreteLogarithm.cpp} -\end{algorithm} -%TODO -\begin{algorithm}{Diskrete \textrm{\textit{n}}-te Wurzel} - \begin{methods} - \method{root}{bestimmt Lösung $x$ für $x^a=b \bmod m$ }{\sqrt{m}\*\log(m)} - \end{methods} - Alle Lösungen haben die Form $g^{c + \frac{i \cdot \phi(n)}{\gcd(a, \phi(n))}}$ - \sourcecode{math/discreteNthRoot.cpp} -\end{algorithm} - - -\begin{algorithm}{Primitivwurzeln} - \begin{itemize} - \item Primitivwurzel modulo $n$ existiert $\Leftrightarrow$ $n \in \{2,\ 4,\ p^\alpha,\ 2\cdot p^\alpha \mid\ 2 < p \in \mathbb{P},\ \alpha \in \mathbb{N}\}$ - \item es existiert entweder keine oder $\varphi(\varphi(n))$ inkongruente Primitivwurzeln - \item Sei $g$ Primitivwurzel modulo $n$. - Dann gilt:\newline - Das kleinste $k$, sodass $g^k \equiv 1 \bmod n$, ist $k = \varphi(n)$. - \end{itemize} - \begin{methods} - \method{isPrimitive}{prüft ob $g$ eine Primitivwurzel ist}{\log(\varphi(n))\*\log(n)} - \method{findPrimitive}{findet Primitivwurzel (oder -1)}{\abs{ans}\*\log(\varphi(n))\*\log(n)} - \end{methods} - \sourcecode{math/primitiveRoot.cpp} -\end{algorithm} - -\begin{algorithm}{Linearessieb und Multiplikative Funktionen} - Eine (zahlentheoretische) Funktion $f$ heißt multiplikativ wenn $f(1)=1$ und $f(a\cdot b)=f(a)\cdot f(b)$, falls $\ggT(a,b)=1$. - - $\Rightarrow$ Es ist ausreichend $f(p^k)$ für alle primen $p$ und alle $k$ zu kennen. - - \begin{methods} - \method{sieve}{berechnet Primzahlen und co.}{N} - \method{sieved}{Wert der endsprechenden Multiplikativen Funktion}{1} - - \method{naive}{Wert der endsprechenden Multiplikativen Funktion}{\sqrt{n}} - \end{methods} - \textbf{Wichtig:} Sieb rechts ist schneller für \code{isPrime} oder \code{primes}! - - \sourcecode{math/linearSieve.cpp} - \textbf{\textsc{Möbius}-Funtkion:} - \begin{itemize} - \item $\mu(n)=+1$, falls $n$ quadratfrei ist und gerade viele Primteiler hat - \item $\mu(n)=-1$, falls $n$ quadratfrei ist und ungerade viele Primteiler hat - \item $\mu(n)=0$, falls $n$ nicht quadratfrei ist - \end{itemize} - - \textbf{\textsc{Euler}sche $\boldsymbol{\varphi}$-Funktion:} - \begin{itemize} - \item Zählt die relativ primen Zahlen $\leq n$. - \item $p$ prim, $k \in \mathbb{N}$: - $~\varphi(p^k) = p^k - p^{k - 1}$ - - \item \textbf{Euler's Theorem:} - Für $b \geq \varphi(c)$ gilt: $a^b \equiv a^{b \bmod \varphi(c) + \varphi(c)} \pmod{c}$. Darüber hinaus gilt: $\gcd(a, c) = 1 \Leftrightarrow a^b \equiv a^{b \bmod \varphi(c)} \pmod{c}$. - Falls $m$ prim ist, liefert das den \textbf{kleinen Satz von \textsc{Fermat}}: - $a^{m} \equiv a \pmod{m}$ - \end{itemize} -\end{algorithm} - -\begin{algorithm}{Primzahlsieb von \textsc{Eratosthenes}} - \begin{itemize} - \item Bis $10^8$ in unter 64MB Speicher (lange Berechnung) - \end{itemize} - \begin{methods} - \method{primeSieve}{berechnet Primzahlen und Anzahl}{N\*\log(\log(N))} - \method{isPrime}{prüft ob Zahl prim ist}{1} - \end{methods} - \sourcecode{math/primeSieve.cpp} -\end{algorithm} - -\begin{algorithm}{\textsc{Möbius}-Inversion} - \begin{itemize} - \item Seien $f,g : \mathbb{N} \to \mathbb{N}$ und $g(n) := \sum_{d \vert n}f(d)$. - Dann ist $f(n) = \sum_{d \vert n}g(d)\mu(\frac{n}{d})$. - \item $\sum\limits_{d \vert n}\mu(d) = - \begin{cases*} - 1 & falls $n = 1$\\ - 0 & sonst - \end{cases*}$ - \end{itemize} - \textbf{Beispiel Inklusion/Exklusion:} - Gegeben sein eine Sequenz $A={a_1,\ldots,a_n}$ von Zahlen, $1 \leq a_i \leq N$. Zähle die Anzahl der \emph{coprime subsequences}.\newline - \textbf{Lösung}: - Für jedes $x$, sei $cnt[x]$ die Anzahl der Vielfachen von $x$ in $A$. - Es gibt $2^{[x]}-1$ nicht leere Subsequences in $A$, die nur Vielfache von $x$ enthalten. - Die Anzahl der Subsequences mit $\ggT=1$ ist gegeben durch $\sum_{i = 1}^N \mu(i) \cdot (2^{cnt[i]} - 1)$. - %\sourcecode{math/mobius.cpp} -\end{algorithm} - -\optional{ -\columnbreak -\subsection{\textsc{Euler}sche $\boldsymbol{\varphi}$-Funktion} -\begin{itemize} - \item Zählt die relativ primen Zahlen $\leq n$. - - \item Multiplikativ: - $\gcd(a,b) = 1 \Longrightarrow \varphi(a) \cdot \varphi(b) = \varphi(ab)$ - - \item $p$ prim, $k \in \mathbb{N}$: - $~\varphi(p^k) = p^k - p^{k - 1}$ - - \item \textbf{\textsc{Euler}'s Theorem:} - Für $b \geq \varphi(c)$ gilt: $a^b \equiv a^{b \bmod \varphi(c) + \varphi(c)} \pmod{c}$. Darüber hinaus gilt: $\gcd(a, c) = 1 \Leftrightarrow a^b \equiv a^{b \bmod \varphi(c)} \pmod{c}$. - Falls $m$ prim ist, liefert das den \textbf{kleinen Satz von \textsc{Fermat}}: - $a^{m} \equiv a \pmod{m}$ -\end{itemize} -\sourcecode{math/phi.cpp} -} - -\begin{algorithm}{Polynome, FFT, NTT \& andere Transformationen} - Multipliziert Polynome $A$ und $B$. - \begin{itemize} - \item $\deg(A \cdot B) = \deg(A) + \deg(B)$ - \item Vektoren \code{a} und \code{b} müssen mindestens Größe - $\deg(A \cdot B) + 1$ haben. - Größe muss eine Zweierpotenz sein. - \item Für ganzzahlige Koeffizienten: \code{(ll)round(real(a[i]))} - \item \emph{xor}, \emph{or} und \emph{and} Transform funktioniert auch mit \code{double} oder modulo einer Primzahl $p$ falls $p \geq 2^{\texttt{bits}}$ - \end{itemize} - %\lstinputlisting{math/fft.cpp} - %\lstinputlisting{math/ntt.cpp} - %\textcolor{safeOrange}{$\blacksquare$} NTT code, %\textcolor{safeGreen}{$\blacksquare$} FFT code - \sourcecode{math/transforms/fft.cpp} - \sourcecode{math/transforms/ntt.cpp} - \vfill\null - \columnbreak - \sourcecode{math/transforms/bitwiseTransforms.cpp} - Multiplikation mit 2 transforms statt 3: (nur benutzten wenn nötig!) - \sourcecode{math/transforms/fftMul.cpp} -\end{algorithm} - -\begin{algorithm}{Operations on Formal Power Series} - \sourcecode{math/transforms/seriesOperations.cpp} -\end{algorithm} - -\subsection{LGS über $\boldsymbol{\mathbb{F}_p}$} -\method{gauss}{löst LGS}{n^3} -\sourcecode{math/lgsFp.cpp} - -\subsection{LGS über $\boldsymbol{\mathbb{R}}$} -\method{gauss}{löst LGS}{n^3} -\sourcecode{math/gauss.cpp} - -\begin{algorithm}{\textsc{Legendre}-Symbol} - Sei $p \geq 3$ eine Primzahl, $a \in \mathbb{Z}$: - \begin{align*} - \legendre{a}{p} &= - \begin{cases*} - \hphantom{-}0 & falls $p~\vert~a$ \\[-1ex] - \hphantom{-}1 & falls $\exists x \in \mathbb{Z}\backslash p\mathbb{Z} : a \equiv x^2 \bmod p$ \\[-1ex] - -1 & sonst - \end{cases*} \\ - \legendre{-1}{p} = (-1)^{\frac{p - 1}{2}} &= - \begin{cases*} - \hphantom{-}1 & falls $p \equiv 1 \bmod 4$ \\[-1ex] - -1 & falls $p \equiv 3 \bmod 4$ - \end{cases*} \\ - \legendre{2}{p} = (-1)^{\frac{p^2 - 1}{8}} &= - \begin{cases*} - \hphantom{-}1 & falls $p \equiv \pm 1 \bmod 8$ \\[-1ex] - -1 & falls $p \equiv \pm 3 \bmod 8$ - \end{cases*} - \end{align*} - \begin{align*} - \legendre{p}{q} \cdot \legendre{q}{p} = (-1)^{\frac{p - 1}{2} \cdot \frac{q - 1}{2}} && - \legendre{a}{p} \equiv a^{\frac{p-1}{2}}\bmod p - \end{align*} - \sourcecode{math/legendre.cpp} -\end{algorithm} - -\optional{ -\subsection{Primzahlzählfunktion $\boldsymbol{\pi}$} -\begin{methods} - \method{init}{berechnet $\pi$ bis $N$}{N\*\log(\log(N))} - \method{phi}{zählt zu $p_i$ teilerfremde Zahlen $\leq n$ für alle $i \leq k$}{???} - \method{pi}{zählt Primzahlen $\leq n$ ($n < N^2$)}{n^{2/3}} -\end{methods} -\sourcecode{math/piLehmer.cpp} -} - -\begin{algorithm}{Lineare Rekurrenz} - \begin{methods} - \method{BerlekampMassey}{Berechnet eine lineare Rekurrenz $n$-ter Ordnung}{n^2} - \method{}{aus den ersten $2n$ Werte}{} - \end{methods} - \sourcecode{math/berlekampMassey.cpp} - Sei $f(n)=c_{n-1}f(n-1)+c_{n-2}f(n-2)+\dots + c_0f(0)$ eine lineare Rekurrenz. - - \begin{methods} - \method{kthTerm}{Berechnet $k$-ten Term einer Rekurrenz $n$-ter Ordnung}{\log(k)\cdot n^2} - \end{methods} - \sourcecode{math/linearRecurence.cpp} - Alternativ kann der \mbox{$k$-te} Term in \runtime{n^3\log(k)} berechnet werden: - $$\renewcommand\arraystretch{1.5} - \setlength\arraycolsep{3pt} - \begin{pmatrix} - c_{n-1} & c_{n-2} & \smash{\cdots} & \smash{\cdots} & c_0 \\ - 1 & 0 & \smash{\cdots} & \smash{\cdots} & 0 \\ - 0 & \smash{\ddots} & \smash{\ddots} & & \smash{\vdots} \\ - \smash{\vdots} & \smash{\ddots} & \smash{\ddots} & \smash{\ddots} & \smash{\vdots} \\ - 0 & \smash{\cdots} & 0 & 1 & 0 \\ - \end{pmatrix}^k - \times~~ - \begin{pmatrix} - f(n-1) \\ - f(n-2) \\ - \smash{\vdots} \\ - \smash{\vdots} \\ - f(0) \\ - \end{pmatrix} - ~~=~~ - \begin{pmatrix} - f(n-1+k) \\ - f(n-2+k) \\ - \smash{\vdots} \\ - \smash{\vdots} \\ - f(k) \makebox[0pt][l]{\hspace{15pt}$\vcenter{\hbox{\huge$\leftarrow$}}$}\\ - \end{pmatrix} - $$ -\end{algorithm} - -\begin{algorithm}{Matrix-Exponent} - \begin{methods} - \method{precalc}{berechnet $m^{2^b}$ vor}{\log(b)\*n^3} - \method{calc}{berechnet $m^b_{y,x}$}{\log(b)\cdot n^2} - \end{methods} - \sourcecode{math/matrixPower.cpp} -\end{algorithm} - -\begin{algorithm}{Inversionszahl} - \sourcecode{math/inversions.cpp} -\end{algorithm} - -\subsection{Satz von \textsc{Sprague-Grundy}} -Weise jedem Zustand $X$ wie folgt eine \textsc{Grundy}-Zahl $g\left(X\right)$ zu: -\[ -g\left(X\right) := \min\left\{ -\mathbb{Z}_0^+ \setminus -\left\{g\left(Y\right) \mid Y \text{ von } X \text{ aus direkt erreichbar}\right\} -\right\} -\] -$X$ ist genau dann gewonnen, wenn $g\left(X\right) > 0$ ist.\\ -Wenn man $k$ Spiele in den Zuständen $X_1, \ldots, X_k$ hat, dann ist die \textsc{Grundy}-Zahl des Gesamtzustandes $g\left(X_1\right) \oplus \ldots \oplus g\left(X_k\right)$. - -\subsection{Kombinatorik} - -\paragraph{Wilsons Theorem} -A number $n$ is prime if and only if -$(n-1)!\equiv -1\bmod{n}$.\\ -($n$ is prime if and only if $(m-1)!\cdot(n-m)!\equiv(-1)^m\bmod{n}$ for all $m$ in $\{1,\dots,n\}$) -\begin{align*} - (n-1)!\equiv\begin{cases} - -1\bmod{n},&\mathrm{falls}~n \in \mathbb{P}\\ - \hphantom{-}2\bmod{n},&\mathrm{falls}~n = 4\\ - \hphantom{-}0\bmod{n},&\mathrm{sonst} - \end{cases} -\end{align*} - -\paragraph{\textsc{Zeckendorfs} Theorem} -Jede positive natürliche Zahl kann eindeutig als Summe einer oder mehrerer -verschiedener \textsc{Fibonacci}-Zahlen geschrieben werden, sodass keine zwei -aufeinanderfolgenden \textsc{Fibonacci}-Zahlen in der Summe vorkommen.\\ -\emph{Lösung:} Greedy, nimm immer die größte \textsc{Fibonacci}-Zahl, die noch -hineinpasst. - -\paragraph{\textsc{Lucas}-Theorem} -Ist $p$ prim, $m=\sum_{i=0}^km_ip^i$, $n=\sum_{i=0}^kn_ip^i$ ($p$-adische Darstellung), -so gilt -\vspace{-0.75\baselineskip} -\[ - \binom{m}{n} \equiv \prod_{i=0}^k\binom{m_i}{n_i} \bmod{p}. -\] - -%\begin{algorithm}{Binomialkoeffizienten} -\paragraph{Binomialkoeffizienten} - Die Anzahl der \mbox{$k$-elementigen} Teilmengen einer \mbox{$n$-elementigen} Menge. - - \begin{methods} - \method{precalc}{berechnet $n!$ und $n!^{-1}$ vor}{\mathit{lim}} - \method{calc\_binom}{berechnet Binomialkoeffizient}{1} - \end{methods} - \sourcecode{math/binomial0.cpp} - Falls $n >= p$ for $\mathit{mod}=p^k$ berechne \textit{fac} und \textit{inv} aber teile $p$ aus $i$ und berechne die häufigkeit von $p$ in $n!$ als $\sum\limits_{i=1}\big\lfloor\frac{n}{p^i}\big\rfloor$ - - \begin{methods} - \method{calc\_binom}{berechnet Binomialkoeffizient $(n \le 61)$}{k} - \end{methods} - \sourcecode{math/binomial1.cpp} - - \begin{methods} - \method{calc\_binom}{berechnet Binomialkoeffizient modulo Primzahl $p$}{p-n} - \end{methods} - \sourcecode{math/binomial3.cpp} - -% \begin{methods} -% \method{calc\_binom}{berechnet Primfaktoren vom Binomialkoeffizient}{n} -% \end{methods} -% \textbf{WICHTIG:} braucht alle Primzahlen $\leq n$ -% \sourcecode{math/binomial2.cpp} -%\end{algorithm} - -\paragraph{\textsc{Catalan}-Zahlen} -\begin{itemize} - \item Die \textsc{Catalan}-Zahl $C_n$ gibt an: - \begin{itemize} - \item Anzahl der Binärbäume mit $n$ nicht unterscheidbaren Knoten. - \item Anzahl der validen Klammerausdrücke mit $n$ Klammerpaaren. - \item Anzahl der korrekten Klammerungen von $n+1$ Faktoren. - \item Anzahl Möglichkeiten ein konvexes Polygon mit $n + 2$ Ecken zu triangulieren. - \item Anzahl der monotonen Pfade (zwischen gegenüberliegenden Ecken) in - einem $n \times n$-Gitter, die nicht die Diagonale kreuzen. - \end{itemize} -\end{itemize} -\[C_0 = 1\qquad C_n = \sum\limits_{k = 0}^{n - 1} C_kC_{n - 1 - k} = -\frac{1}{n + 1}\binom{2n}{n} = \frac{4n - 2}{n+1} \cdot C_{n-1}\] -\begin{itemize} - \item Formel $1$ erlaubt Berechnung ohne Division in \runtime{n^2} - \item Formel $2$ und $3$ erlauben Berechnung in \runtime{n} -\end{itemize} - -\paragraph{\textsc{Catalan}-Convolution} -\begin{itemize} - \item Anzahl an Klammerausdrücken mit $n+k$ Klammerpaaren, die mit $(^k$ beginnen. -\end{itemize} -\[C^k_0 = 1\qquad C^k_n = \sum\limits_{\mathclap{a_0+a_1+\dots+a_k=n}} C_{a_0}C_{a_1}\cdots C_{a_k} = -\frac{k+1}{n+k+1}\binom{2n+k}{n} = \frac{(2n+k-1)\cdot(2n+k)}{n(n+k+1)} \cdot C_{n-1}\] - -\paragraph{\textsc{Euler}-Zahlen 1. Ordnung} -Die Anzahl der Permutationen von $\{1, \ldots, n\}$ mit genau $k$ Anstiegen. -Für die $n$-te Zahl gibt es $n$ mögliche Positionen zum Einfügen. -Dabei wird entweder ein Anstieg in zwei gesplitted oder ein Anstieg um $n$ ergänzt. -\[\eulerI{n}{0} = \eulerI{n}{n-1} = 1 \quad -\eulerI{n}{k} = (k+1) \eulerI{n-1}{k} + (n-k) \eulerI{n-1}{k-1}= -\sum_{i=0}^{k} (-1)^i\binom{n+1}{i}(k+1-i)^n\] -\begin{itemize} - \item Formel $1$ erlaubt Berechnung ohne Division in \runtime{n^2} - \item Formel $2$ erlaubt Berechnung in \runtime{n\log(n)} -\end{itemize} - -\paragraph{\textsc{Euler}-Zahlen 2. Ordnung} -Die Anzahl der Permutationen von $\{1,1, \ldots, n,n\}$ mit genau $k$ Anstiegen. -\[\eulerII{n}{0} = 1 \qquad\eulerII{n}{n} = 0 \qquad\eulerII{n}{k} = (k+1) \eulerII{n-1}{k} + (2n-k-1) \eulerII{n-1}{k-1}\] -\begin{itemize} - \item Formel erlaubt Berechnung ohne Division in \runtime{n^2} -\end{itemize} - -\paragraph{\textsc{Stirling}-Zahlen 1. Ordnung} -Die Anzahl der Permutationen von $\{1, \ldots, n\}$ mit genau $k$ Zyklen. -Es gibt zwei Möglichkeiten für die $n$-te Zahl. Entweder sie bildet einen eigene Zyklus, oder sie kann an jeder Position in jedem Zyklus einsortiert werden. -\[\stirlingI{0}{0} = 1 \qquad -\stirlingI{n}{0} = \stirlingI{0}{n} = 0 \qquad -\stirlingI{n}{k} = \stirlingI{n-1}{k-1} + (n-1) \stirlingI{n-1}{k}\] -\begin{itemize} - \item Formel erlaubt berechnung ohne Division in \runtime{n^2} -\end{itemize} -\[\sum_{k=0}^{n}\pm\stirlingI{n}{k}x^k=x(x-1)(x-2)\cdots(x-n+1)\] -\begin{itemize} - \item Berechne Polynom mit FFT und benutzte betrag der Koeffizienten \runtime{n\log(n)^2} (nur ungefähr gleich große Polynome zusammen multiplizieren beginnend mit $x-k$) -\end{itemize} - -\paragraph{\textsc{Stirling}-Zahlen 2. Ordnung} -Die Anzahl der Möglichkeiten $n$ Elemente in $k$ nichtleere Teilmengen zu zerlegen. -Es gibt $k$ Möglichkeiten die $n$ in eine $n-1$-Partition einzuordnen. -Dazu kommt der Fall, dass die $n$ in ihrer eigenen Teilmenge (alleine) steht. -\[\stirlingII{n}{1} = \stirlingII{n}{n} = 1 \qquad -\stirlingII{n}{k} = k \stirlingII{n-1}{k} + \stirlingII{n-1}{k-1} = -\frac{1}{k!} \sum\limits_{i=0}^{k} (-1)^{k-i}\binom{k}{i}i^n\] -\begin{itemize} - \item Formel $1$ erlaubt Berechnung ohne Division in \runtime{n^2} - \item Formel $2$ erlaubt Berechnung in \runtime{n\log(n)} -\end{itemize} - -\paragraph{\textsc{Bell}-Zahlen} -Anzahl der Partitionen von $\{1, \ldots, n\}$. -Wie \textsc{Stirling}-Zahlen 2. Ordnung ohne Limit durch $k$. -\[B_1 = 1 \qquad -B_n = \sum\limits_{k = 0}^{n - 1} B_k\binom{n-1}{k} -= \sum\limits_{k = 0}^{n}\stirlingII{n}{k}\qquad\qquad B_{p^m+n}\equiv m\cdot B_n + B_{n+1} \bmod{p}\] - -\paragraph{Partitions} -Die Anzahl der Partitionen von $n$ in genau $k$ positive Summanden. -Die Anzahl der Partitionen von $n$ mit Elementen aus ${1,\dots,k}$. -\begin{align*} - p_0(0)=1 \qquad p_k(n)&=0 \text{ für } k > n \text{ oder } n \leq 0 \text{ oder } k \leq 0\\ - p_k(n)&= p_k(n-k) + p_{k-1}(n-1)\\[2pt] - p(n)&=\sum_{k=1}^{n} p_k(n)=p_n(2n)=\sum\limits_{k\neq0}^\infty(-1)^{k+1}p\bigg(n - \frac{k(3k-1)}{2}\bigg) -\end{align*} -\begin{itemize} - \item in Formel $3$ kann abgebrochen werden wenn $\frac{k(3k-1)}{2} > n$. - \item Die Anzahl der Partitionen von $n$ in bis zu $k$ positive Summanden ist $\sum\limits_{i=0}^{k}p_i(n)=p_k(n+k)$. -\end{itemize} - -\subsection{The Twelvefold Way \textnormal{(verteile $n$ Bälle auf $k$ Boxen)}} -\input{math/tables/twelvefold} - -%\input{math/tables/numbers} - -\begin{algorithm}[optional]{Big Integers} - \sourcecode{math/bigint.cpp} -\end{algorithm} diff --git a/math/matrixPower.cpp b/math/matrixPower.cpp deleted file mode 100644 index 05e29f6..0000000 --- a/math/matrixPower.cpp +++ /dev/null @@ -1,16 +0,0 @@ -vector pows; - -void precalc(mat m) { - pows = {mat(1), m}; - for (int i = 1; i < 60; i++) pows.push_back(pows[i] * pows[i]); -} - -ll calc(int x, int y, ll b) { - vector v(pows[0].m.size()); - v[x] = 1; - for (ll i = 1; b > 0; i++) { - if (b & 1) v = pows[i] * v; - b /= 2; - } - return v[y]; -} diff --git a/math/millerRabin.cpp b/math/millerRabin.cpp deleted file mode 100644 index cb27d29..0000000 --- a/math/millerRabin.cpp +++ /dev/null @@ -1,19 +0,0 @@ -constexpr ll bases32[] = {2, 7, 61}; -constexpr ll bases64[] = {2, 325, 9375, 28178, 450775, - 9780504, 1795265022}; -bool isPrime(ll n) { - if (n < 2 || n % 2 == 0) return n == 2; - ll d = n - 1, j = 0; - while (d % 2 == 0) d /= 2, j++; - for (ll a : bases64) { - if (a % n == 0) continue; - ll v = powMod(a, d, n); //with mulmod or int128 - if (v == 1 || v == n - 1) continue; - for (ll i = 1; i <= j; i++) { - v = ((lll)v * v) % n; - if (v == n - 1 || v <= 1) break; - } - if (v != n - 1) return false; - } - return true; -} diff --git a/math/mobius.cpp b/math/mobius.cpp deleted file mode 100644 index 3fb4d9e..0000000 --- a/math/mobius.cpp +++ /dev/null @@ -1,21 +0,0 @@ -ll mu(ll n) { // Laufzeit: O(sqrt(n)); - ll res = 1; - for (ll i = 2; i * i <= n; i++) { - if (n % i == 0) { // Optimierung: Nur Primzahlen - if (n % (i * i) == 0) return 0; - res *= -1; - n /= i; - }} - return n > 1 ? -res : res; -} - -// berechnet Möbiusfunktion. Laufzeit: O(N*log(log(N))) -vector mu(n + 1, 1); -for (ll i = 2; i <= n; i++) { - if (mu[i] == 1) { - for (ll j = i; j <= n; j += i) mu[j] *= -2; - for (ll j = i*i; j <= n; j += i*i) mu[j] = 0; - } - // log2(abs(mu[i])) = number of primes - mu[i] = (mu[i] > 0) - (mu[i] < 0); -} diff --git a/math/modExp.cpp b/math/modExp.cpp deleted file mode 100644 index 2329a94..0000000 --- a/math/modExp.cpp +++ /dev/null @@ -1,6 +0,0 @@ -ll powMod(ll a, ll b, ll n) { - if(b == 0) return 1; - if(b == 1) return a % n; - if(b & 1) return (powMod(a, b - 1, n) * a) % n; - else return powMod((a * a) % n, b / 2, n); -} diff --git a/math/modMulIterativ.cpp b/math/modMulIterativ.cpp deleted file mode 100644 index 611f09a..0000000 --- a/math/modMulIterativ.cpp +++ /dev/null @@ -1,9 +0,0 @@ -ll mulMod(ll a, ll b, ll n) { - ll res = 0; - while (b > 0) { - if (b & 1) res = (a + res) % n; - a = (a * 2) % n; - b /= 2; - } - return res; -} diff --git a/math/modPowIterativ.cpp b/math/modPowIterativ.cpp deleted file mode 100644 index 0dc3fb1..0000000 --- a/math/modPowIterativ.cpp +++ /dev/null @@ -1,9 +0,0 @@ -ll powMod(ll a, ll b, ll n) { - ll res = 1; - while (b > 0) { - if (b & 1) res = (a * res) % n; - a = (a * a) % n; - b /= 2; - } - return res; -} diff --git a/math/modSqrt.cpp b/math/modSqrt.cpp deleted file mode 100644 index 367c6c7..0000000 --- a/math/modSqrt.cpp +++ /dev/null @@ -1,23 +0,0 @@ -ll sqrtMod(ll a, ll p) { - assert(powMod(a, (p + 1)/2, p) == 1); //a ist ein quadrat mod p? - if (p % 4 == 3) return powMod(a, (p + 1)/2, p); - if (p % 8 == 5) return powMod(a, (p + 3)/8, p); - ll s = p - 1; - ll r = 0; - while (s % 2 == 0) s /= 2, r++; - ll n = 2; - while (powMod(n, (p - 1)/2, p) != p - 1) n++; - ll x = powMod(a, (s + 1)/2, p); - ll b = powMod(a, s, p); - ll g = powMod(n, s, p); - while (true) { - ll t = b; - ll m = 0; - for (;m < r && t != 1; m++) t = (t * t) % p; - if (t == 1) return x; - ll gs = powMod(g, 1ll << (r - m - 1), p); - g = (gs * gs) % p; - x = (x * gs) % p; - b = (b * g) % p; - r = m; -}} diff --git a/math/multInv.cpp b/math/multInv.cpp deleted file mode 100644 index 647dc2d..0000000 --- a/math/multInv.cpp +++ /dev/null @@ -1,4 +0,0 @@ -ll multInv(ll x, ll m) { - auto [d, a, b] = extendedEuclid(x, m); // Implementierung von oben. - return ((a % m) + m) % m; -} diff --git a/math/permIndex.cpp b/math/permIndex.cpp deleted file mode 100644 index 4cffc12..0000000 --- a/math/permIndex.cpp +++ /dev/null @@ -1,13 +0,0 @@ -ll permIndex(vector v) { - Tree t; - reverse(all(v)); - for (ll& x : v) { - t.insert(x); - x = t.order_of_key(x); - } - ll res = 0; - for (int i = sz(v); i > 0; i--) { - res = res * i + v[i - 1]; - } - return res; -} diff --git a/math/phi.cpp b/math/phi.cpp deleted file mode 100644 index 482a139..0000000 --- a/math/phi.cpp +++ /dev/null @@ -1,21 +0,0 @@ -ll phi(ll n) { // Laufzeit: O(sqrt(n)) - // Optimierung: Falls n prim, n - 1 zurückgeben - ll result = n; - for(ll i = 2; i * i <= n; ++i) { - if(n % i == 0) { // Optimierung: Nur Primzahlen - while(n % i == 0) n /= i; - result -= result / i; - }} - if(n > 1) result -= result / n; - return result; -} - -// Sieb, falls alle Werte benötigt werden. -// Laufzeit: O(N*log(log(N))) -vector phi(n + 1); -for (int i = 1; i <= n; i++) phi[i] = i; -for (int i = 2; i <= n; i++) if (phi[i] == i) { - for (int j = i; j <= n; j += i) { - phi[j] /= i; - phi[j] *= i - 1; -}} diff --git a/math/piLegendre.cpp b/math/piLegendre.cpp deleted file mode 100644 index 21b974b..0000000 --- a/math/piLegendre.cpp +++ /dev/null @@ -1,23 +0,0 @@ -constexpr ll cache = 500; // requires O(cache^3) -vector> memo(cache * cache, vector(cache)); - -ll pi(ll n); - -ll phi(ll n, ll k) { - if (n <= 1 || k < 0) return 0; - if (n <= primes[k]) return n - 1; - if (n < N && primes[k] * primes[k] > n) return n - pi(n) + k; - bool ok = n < cache * cache; - if (ok && memo[n][k] > 0) return memo[n][k]; - ll res = n/primes[k] - phi(n/primes[k], k - 1) + phi(n, k - 1); - if (ok) memo[n][k] = res; - return res; -} - -ll pi(ll n) { - if (n < N) { // implement this as O(1) lookup for speedup! - return distance(primes.begin(), upper_bound(all(primes), n)); - } else { - ll k = pi(sqrtl(n) + 1); - return n - phi(n, k) + k; -}} diff --git a/math/piLehmer.cpp b/math/piLehmer.cpp deleted file mode 100644 index 56c172d..0000000 --- a/math/piLehmer.cpp +++ /dev/null @@ -1,52 +0,0 @@ -constexpr ll cacheA = 2 * 3 * 5 * 7 * 11 * 13 * 17; -constexpr ll cacheB = 7; -ll memoA[cacheA + 1][cacheB + 1]; -ll memoB[cacheB + 1]; -ll memoC[N]; - -void init() { - primeSieve(); // code from above - for (ll i = 0; i < N; i++) { - memoC[i] = memoC[i - 1]; - if (isPrime(i)) memoC[i]++; - } - memoB[0] = 1; - for(ll i = 0; i <= cacheA; i++) memoA[i][0] = i; - for(ll i = 1; i <= cacheB; i++) { - memoB[i] = primes[i - 1] * memoB[i - 1]; - for(ll j = 1; j <= cacheA; j++) { - memoA[j][i] = memoA[j][i - 1] - memoA[j / - primes[i - 1]][i - 1]; -}}} - -ll phi(ll n, ll k) { - if(k == 0) return n; - if(k <= cacheB) - return memoA[n % memoB[k]][k] + - (n / memoB[k]) * memoA[memoB[k]][k]; - if(n <= primes[k - 1]*primes[k - 1]) return memoC[n] - k + 1; - if(n <= primes[k - 1]*primes[k - 1]*primes[k - 1] && n < N) { - ll b = memoC[(ll)sqrtl(n)]; - ll res = memoC[n] - (b + k - 2) * (b - k + 1) / 2; - for(ll i = k; i < b; i++) res += memoC[n / primes[i]]; - return res; - } - return phi(n, k - 1) - phi(n / primes[k - 1], k - 1); -} - -ll pi(ll n) { - if (n < N) return memoC[n]; - ll a = pi(sqrtl(sqrtl(n))); - ll b = pi(sqrtl(n)); - ll c = pi(cbrtl(n)); - ll res = phi(n, a) + (b + a - 2) * (b - a + 1) / 2; - for (ll i = a; i < b; i++) { - ll w = n / primes[i]; - res -= pi(w); - if (i > c) continue; - ll bi = pi(sqrtl(w)); - for (ll j = i; j < bi; j++) { - res -= pi(w / primes[j]) - j; - }} - return res; -} diff --git a/math/polynomial.cpp b/math/polynomial.cpp deleted file mode 100644 index 44f6207..0000000 --- a/math/polynomial.cpp +++ /dev/null @@ -1,65 +0,0 @@ -struct poly { - vector data; - - poly(int deg = 0) : data(max(1, deg)) {} - poly(initializer_list _data) : data(_data) {} - - int size() const {return sz(data);} - - void trim() { - for (ll& x : data) x = (x % mod + mod) % mod; - while (size() > 1 && data.back() == 0) data.pop_back(); - } - - ll& operator[](int x) {return data[x];} - const ll& operator[](int x) const {return data[x];} - - ll operator()(int x) const { - ll res = 0; - for (int i = size() - 1; i >= 0; i--) - res = (res * x + data[i]) % mod; - return res % mod; - } - - poly& operator+=(const poly& o) { - if (size() < o.size()) data.resize(o.size()); - for (int i = 0; i < o.size(); i++) - data[i] = (data[i] + o[i]) % mod; - return *this; - } - - poly operator*(const poly& o) const { - poly res(size() + o.size() - 1); - for (int i = 0; i < size(); i++) { - for (int j = 0; j < o.size(); j++) { - res[i + j] += (data[i] * o[j]) % mod; - }} - res.trim(); - return res; - } - - //return p(x+a) - poly operator<<(ll a) const { - poly res(size()); - for (int i = size() - 1; i >= 0; i--) { - for (int j = size() - i - 1; j >= 1; j--) - res[j] = (res[j] * a + res[j - 1]) % mod; - res[0] = (res[0] * a + res[i]) % mod; - } - return res; - } - - pair divmod(const poly& d) const { - int i = size() - d.size(); - poly s(i + 1), r = *this; - ll inv = multInv(d.data.back(), mod); - for (; i >= 0; i--) { - s[i] = (r.data.back() * inv) % mod; - r.data.pop_back(); - for (int j = 0; i + j < r.size(); j++) { - r[i + j] = (r.data[i + j] - s[i] * d[j]) % mod; - }} - s.trim(); r.trim(); - return {s, r}; - } -}; diff --git a/math/primeSieve.cpp b/math/primeSieve.cpp deleted file mode 100644 index 1b0f514..0000000 --- a/math/primeSieve.cpp +++ /dev/null @@ -1,16 +0,0 @@ -constexpr ll N = 100'000'000; -bitset isNotPrime; -vector primes = {2}; - -bool isPrime(ll x) { - if (x < 2 || x % 2 == 0) return x == 2; - else return !isNotPrime[x / 2]; -} - -void primeSieve() { - for (ll i = 3; i < N; i += 2) {// i * i < N reicht für isPrime - if (!isNotPrime[i / 2]) { - primes.push_back(i); // optional - for (ll j = i * i; j < N; j+= 2 * i) { - isNotPrime[j / 2] = 1; -}}}} diff --git a/math/primitiveRoot.cpp b/math/primitiveRoot.cpp deleted file mode 100644 index 464bdb3..0000000 --- a/math/primitiveRoot.cpp +++ /dev/null @@ -1,23 +0,0 @@ -bool isPrimitive(ll g, ll n, ll phi, map phiFacs) { - if (g == 1) return n == 2; - for (auto [f, _] : phiFacs) - if (powMod(g, phi / f, n) == 1) return false; - return true; -} - -bool isPrimitive(ll g, ll n) { - ll phin = phi(n); //isPrime(n) => phi(n) = n - 1 - map phiFacs; - factor(phin, phiFacs); - return isPrimitive(g, n, phin, phiFacs); -} - -ll findPrimitive(ll n) { - ll phin = phi(n); //isPrime(n) => phi(n) = n - 1 - map phiFacs; - factor(phin, phiFacs); - //auch zufällige Reihenfolge möglich! - for (ll res = 1; res < n; res++) - if (isPrimitive(res, n, phin, phiFacs)) return res; - return -1; -} diff --git a/math/rho.cpp b/math/rho.cpp deleted file mode 100644 index 7885196..0000000 --- a/math/rho.cpp +++ /dev/null @@ -1,19 +0,0 @@ -using lll = __int128; -ll rho(ll n) { // Findet Faktor < n, nicht unbedingt prim. - if (n % 2 == 0) return 2; - ll x = 0, y = 0, prd = 2, i = n/2 + 7; - auto f = [&](lll x){return (x * x + i) % n;}; - for (ll t = 30, i = n/2 + 7; t % 40 || gcd(prd, n) == 1; t++) { - if (x == y) x = ++i, y = f(x); - if (ll q = (lll)prd * abs(x-y) % n; q) prd = q; - x = f(x); y = f(f(y)); - } - return gcd(prd, n); -} - -void factor(ll n, map& facts) { - if (n == 1) return; - if (isPrime(n)) {facts[n]++; return;} - ll f = rho(n); - factor(n / f, facts); factor(f, facts); -} diff --git a/math/shortModInv.cpp b/math/shortModInv.cpp deleted file mode 100644 index 244bacf..0000000 --- a/math/shortModInv.cpp +++ /dev/null @@ -1,3 +0,0 @@ -ll multInv(ll x, ll m) { // x^{-1} mod m - return 1 < x ? m - inv(m % x, x) * m / x : 1; -} diff --git a/math/simpson.cpp b/math/simpson.cpp deleted file mode 100644 index a99b911..0000000 --- a/math/simpson.cpp +++ /dev/null @@ -1,12 +0,0 @@ -double f(double x) {return x;} - -double simps(double a, double b) { - return (f(a) + 4.0 * f((a + b) / 2.0) + f(b)) * (b - a) / 6.0; -} - -double integrate(double a, double b) { - double m = (a + b) / 2.0; - double l = simps(a, m), r = simps(m, b), tot = simps(a, b); - if (abs(l + r - tot) < EPS) return tot; - return integrate(a, m) + integrate(m, b); -} diff --git a/math/sqrtModCipolla.cpp b/math/sqrtModCipolla.cpp deleted file mode 100644 index 12bc590..0000000 --- a/math/sqrtModCipolla.cpp +++ /dev/null @@ -1,13 +0,0 @@ -bool isSquare(ll x, ll p){ - return powMod(x, p/2, p) != p-1; -} - -// Teste vorher, ob sqrt(n) mod p existiert! -ll sqrtMod(ll n, ll p){ - if(n == 0) return 0; - ll r0 = 1, r1 = 0, b0 = 1, b1 = 1, w; - while(isSquare(w=(b0*b0-n+p)%p, p)) b0 = rng()%p; - for(ll e = (p+1)/2; e; e /= 2, tie(b0, b1) = pair((b0*b0 + b1*b1%p*w)%p, 2*b0*b1%p)) - if(e & 1) tie(r0, r1) = pair((r0*b0 + r1*b1%p*w)%p, (r0*b1 + b0*r1)%p); - return r0; -} diff --git a/math/squfof.cpp b/math/squfof.cpp deleted file mode 100644 index 1cb97de..0000000 --- a/math/squfof.cpp +++ /dev/null @@ -1,89 +0,0 @@ -using lll = __int128; - -constexpr lll multipliers[] = {1, 3, 5, 7, - 11, 3*5, 3*7, 3*11, - 5*7, 5*11, 7*11, - 3*5*7, 3*5*11, 3*7*11, - 5*7*11, 3*5*7*11}; - -lll root(lll x) { - lll r = sqrtl(x); - while(r*r < x) r++; - while(r*r > x) r--; - return r; -} - -lll croot(lll x) { - lll r = cbrtl(x); - while(r*r*r < x) r++; - while(r*r*r > x) r--; - return r; -} - -lll squfof(lll N) { - lll s = croot(N); - if (s*s*s == N) return s; - s = root(N); - if (s*s == N) return s; - for (lll k : multipliers) { - lll D = k * N; - lll Po, P, Pprev, q, b, r, i; - Po = Pprev = P = root(D); - lll Qprev = 1; - lll Q = D - Po*Po; - lll L = 2 * root(2 * s); - lll B = 3 * L; - for (i = 2; i < B; i++) { - b = (Po + P) / Q; - P = b*Q - P; - q = Q; - Q = Qprev + b * (Pprev - P); - r = root(Q); - if (!(i & 1) && r*r == Q) break; - Qprev = q; - Pprev = P; - } - if (i >= B) continue; - b = (Po - P) / r; - Pprev = P = b*r + P; - Qprev = r; - Q = (D-Pprev*Pprev)/Qprev; - i = 0; - do { - b = (Po + P) / Q; - Pprev = P; - P = b*Q - P; - q = Q; - Q = Qprev + b * (Pprev - P); - Qprev = q; - i++; - } while(P != Pprev); - r = gcd(N, Qprev); - if (r != 1 && r != N) return r; - } - exit(1);//try fallback to pollard rho -} - -constexpr lll trialLim = 5'000; - -void factor(lll n, map& facts) { - for (lll i = 2; i * i <= n && i <= trialLim; i++) { - while (n % i == 0) { - facts[i]++; - n /= i; - }} - if (n > 1 && n < trialLim * trialLim) { - facts[n]++; - } else { - vector todo = {n}; - while (!todo.empty()) { - lll c = todo.back(); - todo.pop_back(); - if (c == 1) continue; - if (isPrime(c)) { - facts[c]++; - } else { - lll d = squfof(c); - todo.push_back(d); - todo.push_back(c / d); -}}}} diff --git a/math/tables.tex b/math/tables.tex deleted file mode 100644 index 53f3758..0000000 --- a/math/tables.tex +++ /dev/null @@ -1,18 +0,0 @@ -\enlargethispage{0.2cm} -\begin{multicols*}{2} - \input{math/tables/binom} - \vfill - \input{math/tables/composite} - \vfill - \input{math/tables/platonic} - \vfill - \input{math/tables/series} - - \columnbreak - - \input{math/tables/probability} - \vfill - \input{math/tables/stuff} - \vfill - \input{math/tables/nim} -\end{multicols*} diff --git a/math/tables/binom.tex b/math/tables/binom.tex deleted file mode 100644 index 878a6b0..0000000 --- a/math/tables/binom.tex +++ /dev/null @@ -1,28 +0,0 @@ -\begin{tabularx}{\linewidth}{|XXXX|} - \hline - \multicolumn{4}{|c|}{Binomialkoeffizienten} \\ - \hline - \multicolumn{4}{|c|}{ - $\frac{n!}{k!(n - k)!} \hfill=\hfill - \binom{n}{k} \hfill=\hfill - \binom{n}{n - k} \hfill=\hfill - \frac{n}{k}\binom{n - 1}{k - 1} \hfill=\hfill - \frac{n-k+1}{k}\binom{n}{k - 1} \hfill=\hfill - \binom{n - 1}{k} + \binom{n - 1}{k - 1} \hfill=\hfill - (-1)^k \binom{k - n - 1}{k} \hfill\approx\hfill - 2^{n} \cdot \frac{2}{\sqrt{2\pi n}}\cdot\exp\left(-\frac{2(x - \frac{n}{2})^2}{n}\right)$ - } \\ - \grayhline - - $\sum\limits_{k = 0}^n \binom{n}{k} = 2^n$ & - $\sum\limits_{k = 0}^n \binom{k}{m} = \binom{n + 1}{m + 1}$ & - $\sum\limits_{i = 0}^n \binom{n}{i}^2 = \binom{2n}{n}$ & - $\sum\limits_{k = 0}^n\binom{r + k}{k} = \binom{r + n + 1}{n}$\\ - - $\binom{n}{m}\binom{m}{k} = \binom{n}{k}\binom{n - k}{m - k}$ & - $\sum\limits_{k = 0}^n \binom{r}{k}\binom{s}{n - k} = \binom{r + s}{n}$ & - \multicolumn{2}{l|}{ - $\sum\limits_{i = 1}^n \binom{n}{i} F_i = F_{2n} \quad F_n = n\text{-th Fib.}$ - }\\ - \hline -\end{tabularx} diff --git a/math/tables/composite.tex b/math/tables/composite.tex deleted file mode 100644 index 8e14b2e..0000000 --- a/math/tables/composite.tex +++ /dev/null @@ -1,27 +0,0 @@ - -\begin{tabularx}{\linewidth}{|r||r|r||r|r|r||C|} - \hline - \multicolumn{7}{|c|}{Important Numbers} \\ - \hline - $10^x$ & Highly Composite & \# Divs & $<$ Prime & $>$ Prime & \# Primes & \\ - \hline - 1 & 6 & 4 & $-3$ & $+1$ & 4 & \\ - 2 & 60 & 12 & $-3$ & $+1$ & 25 & \\ - 3 & 840 & 32 & $-3$ & $+9$ & 168 & \\ - 4 & 7\,560 & 64 & $-27$ & $+7$ & 1\,229 & \\ - 5 & 83\,160 & 128 & $-9$ & $+3$ & 9\,592 & \\ - 6 & 720\,720 & 240 & $-17$ & $+3$ & 78\,498 & \\ - 7 & 8\,648\,640 & 448 & $-9$ & $+19$ & 664\,579 & \\ - 8 & 73\,513\,440 & 768 & $-11$ & $+7$ & 5\,761\,455 & \\ - 9 & 735\,134\,400 & 1\,344 & $-63$ & $+7$ & 50\,847\,534 & \\ - 10 & 6\,983\,776\,800 & 2\,304 & $-33$ & $+19$ & 455\,052\,511 & \\ - 11 & 97\,772\,875\,200 & 4\,032 & $-23$ & $+3$ & 4\,118\,054\,813 & \\ - 12 & 963\,761\,198\,400 & 6\,720 & $-11$ & $+39$ & 37\,607\,912\,018 & \\ - 13 & 9\,316\,358\,251\,200 & 10\,752 & $-29$ & $+37$ & 346\,065\,536\,839 & \\ - 14 & 97\,821\,761\,637\,600 & 17\,280 & $-27$ & $+31$ & 3\,204\,941\,750\,802 & \\ - 15 & 866\,421\,317\,361\,600 & 26\,880 & $-11$ & $+37$ & 29\,844\,570\,422\,669 & \\ - 16 & 8\,086\,598\,962\,041\,600 & 41\,472 & $-63$ & $+61$ & 279\,238\,341\,033\,925 & \\ - 17 & 74\,801\,040\,398\,884\,800 & 64\,512 & $-3$ & $+3$ & 2\,623\,557\,157\,654\,233 & \\ - 18 & 897\,612\,484\,786\,617\,600 & 103\,680 & $-11$ & $+3$ & 24\,739\,954\,287\,740\,860 & \\ - \hline -\end{tabularx} diff --git a/math/tables/nim.tex b/math/tables/nim.tex deleted file mode 100644 index 8490d42..0000000 --- a/math/tables/nim.tex +++ /dev/null @@ -1,96 +0,0 @@ -\begin{tabularx}{\linewidth}{|p{0.37\linewidth}|X|} - \hline - \multicolumn{2}{|c|}{Nim-Spiele (\ding{182} letzter gewinnt (normal), \ding{183} letzter verliert)} \\ - \hline - Beschreibung & - Strategie \\ - \hline - - $M = [\mathit{pile}_i]$\newline - $[x] := \{1, \ldots, x\}$& - $\mathit{SG} = \oplus_{i = 1}^n \mathit{pile}_i$\newline - \ding{182} Nimm von einem Stapel, sodass $\mathit{SG}$ $0$ wird.\newline - \ding{183} Genauso. - Außer: Bleiben nur noch Stapel der Größe $1$, erzeuge ungerade Anzahl solcher Stapel.\\ - \hline - - $M = \{a^m \mid m \geq 0\}$ & - $a$ ungerade: $\mathit{SG}_n = n \% 2$\newline - $a$ gerade:\newline - $\mathit{SG}_n = 2$, falls $n \equiv a \bmod (a + 1) $\newline - $\mathit{SG}_n = n \% (a + 1) \% 2$, sonst.\\ - \hline - - $M_{\text{\ding{172}}} = \left[\frac{\mathit{pile}_i}{2}\right]$\newline - $M_{\text{\ding{173}}} = - \left\{\left\lceil\frac{\mathit{pile}_i}{2}\right\rceil,~ - \mathit{pile}_i\right\}$ & - \ding{172} - $\mathit{SG}_{2n} = n$, - $\mathit{SG}_{2n+1} = \mathit{SG}_n$\newline - \ding{173} - $\mathit{SG}_0 = 0$, - $\mathit{SG}_n = [\log_2 n] + 1$ \\ - \hline - - $M_{\text{\ding{172}}} = \text{Teiler von $\mathit{pile}_i$}$\newline - $M_{\text{\ding{173}}} = \text{echte Teiler von $\mathit{pile}_i$}$ & - \ding{172} - $\mathit{SG}_0 = 0$, - $\mathit{SG}_n = \mathit{SG}_{\text{\ding{173},n}} + 1$\newline - \ding{173} - $\mathit{ST}_1 = 0$, - $\mathit{SG}_n = \text{\#Nullen am Ende von $n_{bin}$}$\\ - \hline - - $M_{\text{\ding{172}}} = [k]$\newline - $M_{\text{\ding{173}}} = S$, ($S$ endlich)\newline - $M_{\text{\ding{174}}} = S \cup \{\mathit{pile}_i\}$ & - $\mathit{SG}_{\text{\ding{172}}, n} = n \bmod (k + 1)$\newline - \ding{182} Niederlage bei $\mathit{SG} = 0$\newline - \ding{183} Niederlage bei $\mathit{SG} = 1$\newline - $\mathit{SG}_{\text{\ding{174}}, n} = \mathit{SG}_{\text{\ding{173}}, n} + 1$\\ - \hline - - \multicolumn{2}{|l|}{ - Für jedes endliche $M$ ist $\mathit{SG}$ eines Stapels irgendwann periodisch. - } \\ - \hline - - \textsc{Moore}'s Nim:\newline - Beliebige Zahl von maximal $k$ Stapeln. & - \ding{182} - Schreibe $\mathit{pile}_i$ binär. - Addiere ohne Übertrag zur Basis $k + 1$. - Niederlage, falls Ergebnis gleich 0.\newline - \ding{183} - Wenn alle Stapel $1$ sind: - Niederlage, wenn $n \equiv 1 \bmod (k + 1)$. - Sonst wie in \ding{182}.\\ - \hline - - Staircase Nim:\newline - $n$ Stapel in einer Reihe. - Beliebige Zahl von Stapel $i$ nach Stapel $i-1$. & - Niederlage, wenn Nim der ungeraden Spiele verloren ist:\newline - $\oplus_{i = 0}^{(n - 1) / 2} \mathit{pile}_{2i + 1} = 0$\\ - \hline - - \textsc{Lasker}'s Nim:\newline - Zwei mögliche Züge:\newline - 1) Nehme beliebige Zahl.\newline - 2) Teile Stapel in zwei Stapel (ohne Entnahme).& - $\mathit{SG}_n = n$, falls $n \equiv 1,2 \bmod 4$\newline - $\mathit{SG}_n = n + 1$, falls $n \equiv 3 \bmod 4$\newline - $\mathit{SG}_n = n - 1$, falls $n \equiv 0 \bmod 4$\\ - \hline - - \textsc{Kayles}' Nim:\newline - Zwei mögliche Züge:\newline - 1) Nehme beliebige Zahl.\newline - 2) Teile Stapel in zwei Stapel (mit Entnahme).& - Berechne $\mathit{SG}_n$ für kleine $n$ rekursiv.\newline - $n \in [72,83]: \quad 4, 1, 2, 8, 1, 4, 7, 2, 1, 8, 2, 7$\newline - Periode ab $n = 72$ der Länge $12$.\\ - \hline -\end{tabularx} diff --git a/math/tables/numbers.tex b/math/tables/numbers.tex deleted file mode 100644 index 1dc9f38..0000000 --- a/math/tables/numbers.tex +++ /dev/null @@ -1,59 +0,0 @@ -\begin{expandtable} -\begin{tabularx}{\linewidth}{|l|X|} - \hline - \multicolumn{2}{|c|}{Berühmte Zahlen} \\ - \hline - \textsc{Fibonacci} & - $f(0) = 0 \quad - f(1) = 1 \quad - f(n+2) = f(n+1) + f(n)$ \\ - \grayhline - - \textsc{Catalan} & - $C_0 = 1 \qquad - C_n = \sum\limits_{k = 0}^{n - 1} C_kC_{n - 1 - k} = - \frac{1}{n + 1}\binom{2n}{n} = \frac{2(2n - 1)}{n+1} \cdot C_{n-1}$ \\ - \grayhline - - \textsc{Euler} I & - $\eulerI{n}{0} = \eulerI{n}{n-1} = 1 \qquad - \eulerI{n}{k} = (k+1) \eulerI{n-1}{k} + (n-k) \eulerI{n-1}{k-1} $ \\ - \grayhline - - \textsc{Euler} II & - $\eulerII{n}{0} = 1 \quad - \eulerII{n}{n} = 0 \quad$\\ - & $\eulerII{n}{k} = (k+1) \eulerII{n-1}{k} + (2n-k-1) \eulerII{n-1}{k-1}$ \\ - \grayhline - - \textsc{Stirling} I & - $\stirlingI{0}{0} = 1 \qquad - \stirlingI{n}{0} = \stirlingI{0}{n} = 0 \qquad - \stirlingI{n}{k} = \stirlingI{n-1}{k-1} + (n-1) \stirlingI{n-1}{k}$ \\ - \grayhline - - \textsc{Stirling} II & - $\stirlingII{n}{1} = \stirlingII{n}{n} = 1 \qquad - \stirlingII{n}{k} = k \stirlingII{n-1}{k} + \stirlingII{n-1}{k-1} = - \frac{1}{k!} \sum\limits_{j=0}^{k} (-1)^{k-j}\binom{k}{j}j^n$\\ - \grayhline - - \textsc{Bell} & - $B_1 = 1 \qquad - B_n = \sum\limits_{k = 0}^{n - 1} B_k\binom{n-1}{k} - = \sum\limits_{k = 0}^{n}\stirlingII{n}{k}$\\ - \grayhline - - \textsc{Partitions} & - $p(0,0) = 1 \quad - p(n,k) = 0 \text{ für } k > n \text{ oder } n \leq 0 \text{ oder } k \leq 0$ \\ - & $p(n,k) = p(n-k,k) + p(n-1,k-1)$\\ - \grayhline - - \textsc{Partitions} & - $f(0) = 1 \quad f(n) = 0~(n < 0)$ \\ - & $f(n)=\sum\limits_{k=1}^\infty(-1)^{k-1}f(n - \frac{k(3k+1)}{2})+\sum\limits_{k=1}^\infty(-1)^{k-1}f(n - \frac{k(3k-1)}{2})$\\ - - \hline -\end{tabularx} -\end{expandtable} diff --git a/math/tables/platonic.tex b/math/tables/platonic.tex deleted file mode 100644 index f4ee554..0000000 --- a/math/tables/platonic.tex +++ /dev/null @@ -1,39 +0,0 @@ -\begin{tabularx}{\linewidth}{|X|CCCX|} - \hline - \multicolumn{5}{|c|}{Platonische Körper} \\ - \hline - Übersicht & Seiten & Ecken & Kanten & dual zu \\ - \hline - Tetraeder & 4 & 4 & 6 & Tetraeder \\ - Würfel/Hexaeder & 6 & 8 & 12 & Oktaeder \\ - Oktaeder & 8 & 6 & 12 & Würfel/Hexaeder\\ - Dodekaeder & 12 & 20 & 30 & Ikosaeder \\ - Ikosaeder & 20 & 12 & 30 & Dodekaeder \\ - \hline - \multicolumn{5}{|c|}{Färbungen mit maximal $n$ Farben (bis auf Isomorphie)} \\ - \hline - \multicolumn{3}{|l}{Ecken vom Oktaeder/Seiten vom Würfel} & - \multicolumn{2}{l|}{$(n^6 + 3n^4 + 12n^3 + 8n^2)/24$} \\ - - \multicolumn{3}{|l}{Ecken vom Würfel/Seiten vom Oktaeder} & - \multicolumn{2}{l|}{$(n^8 + 17n^4 + 6n^2)/24$} \\ - - \multicolumn{3}{|l}{Kanten vom Würfel/Oktaeder} & - \multicolumn{2}{l|}{$(n^{12} + 6n^7 + 3n^6 + 8n^4 + 6n^3)/24$} \\ - - \multicolumn{3}{|l}{Ecken/Seiten vom Tetraeder} & - \multicolumn{2}{l|}{$(n^4 + 11n^2)/12$} \\ - - \multicolumn{3}{|l}{Kanten vom Tetraeder} & - \multicolumn{2}{l|}{$(n^6 + 3n^4 + 8n^2)/12$} \\ - - \multicolumn{3}{|l}{Ecken vom Ikosaeder/Seiten vom Dodekaeder} & - \multicolumn{2}{l|}{$(n^{12} + 15n^6 + 44n^4)/60$} \\ - - \multicolumn{3}{|l}{Ecken vom Dodekaeder/Seiten vom Ikosaeder} & - \multicolumn{2}{l|}{$(n^{20} + 15n^{10} + 20n^8 + 24n^4)/60$} \\ - - \multicolumn{3}{|l}{Kanten vom Dodekaeder/Ikosaeder (evtl. falsch)} & - \multicolumn{2}{l|}{$(n^{30} + 15n^{16} + 20n^{10} + 24n^6)/60$} \\ - \hline -\end{tabularx} diff --git a/math/tables/probability.tex b/math/tables/probability.tex deleted file mode 100644 index f265d10..0000000 --- a/math/tables/probability.tex +++ /dev/null @@ -1,27 +0,0 @@ -\begin{tabularx}{\linewidth}{|LICIR|} - \hline - \multicolumn{3}{|c|}{ - Wahrscheinlichkeitstheorie ($A,B$ Ereignisse und $X,Y$ Variablen) - } \\ - \hline - $\E(X + Y) = \E(X) + \E(Y)$ & - $\E(\alpha X) = \alpha \E(X)$ & - $X, Y$ unabh. $\Leftrightarrow \E(XY) = \E(X) \cdot \E(Y)$\\ - - $\Pr[A \vert B] = \frac{\Pr[A \land B]}{\Pr[B]}$ & - $A, B$ disj. $\Leftrightarrow \Pr[A \land B] = \Pr[A] \cdot \Pr[B]$ & - $\Pr[A \lor B] = \Pr[A] + \Pr[B] - \Pr[A \land B]$ \\ - \hline -\end{tabularx} -\vfill -\begin{tabularx}{\linewidth}{|Xlr|lrX|} - \hline - \multicolumn{6}{|c|}{\textsc{Bertrand}'s Ballot Theorem (Kandidaten $A$ und $B$, $k \in \mathbb{N}$)} \\ - \hline - & $\#A > k\#B$ & $Pr = \frac{a - kb}{a + b}$ & - $\#B - \#A \leq k$ & $Pr = 1 - \frac{a!b!}{(a + k + 1)!(b - k - 1)!}$ & \\ - - & $\#A \geq k\#B$ & $Pr = \frac{a + 1 - kb}{a + 1}$ & - $\#A \geq \#B + k$ & $Num = \frac{a - k + 1 - b}{a - k + 1} \binom{a + b - k}{b}$ & \\ - \hline -\end{tabularx} diff --git a/math/tables/series.tex b/math/tables/series.tex deleted file mode 100644 index 3042781..0000000 --- a/math/tables/series.tex +++ /dev/null @@ -1,33 +0,0 @@ -\begin{tabularx}{\linewidth}{|XIXIXIX|} - \hline - \multicolumn{4}{|c|}{Reihen} \\ - \hline - $\sum\limits_{i = 1}^n i = \frac{n(n+1)}{2}$ & - $\sum\limits_{i = 1}^n i^2 = \frac{n(n + 1)(2n + 1)}{6}$ & - $\sum\limits_{i = 1}^n i^3 = \frac{n^2 (n + 1)^2}{4}$ & - $H_n = \sum\limits_{i = 1}^n \frac{1}{i}$ \\ - \grayhline - - $\sum\limits_{i = 0}^n c^i = \frac{c^{n + 1} - 1}{c - 1} \quad c \neq 1$ & - $\sum\limits_{i = 0}^\infty c^i = \frac{1}{1 - c} \quad \vert c \vert < 1$ & - $\sum\limits_{i = 1}^\infty c^i = \frac{c}{1 - c} \quad \vert c \vert < 1$ & - $\sum\limits_{i = 0}^\infty ic^i = \frac{c}{(1 - c)^2} \quad \vert c \vert < 1$ \\ - \grayhline - - \multicolumn{2}{|lI}{ - $\sum\limits_{i = 0}^n ic^i = \frac{nc^{n + 2} - (n + 1)c^{n + 1} + c}{(c - 1)^2} \quad c \neq 1$ - } & - \multicolumn{2}{l|}{ - $\sum\limits_{i = 1}^n iH_i = \frac{n(n + 1)}{2}H_n - \frac{n(n - 1)}{4}$ - } \\ - \grayhline - - \multicolumn{2}{|lI}{ - $\sum\limits_{i = 1}^n H_i = (n + 1)H_n - n$ - } & - \multicolumn{2}{l|}{ - $\sum\limits_{i = 1}^n \binom{i}{m}H_i = - \binom{n + 1}{m + 1} \left(H_{n + 1} - \frac{1}{m + 1}\right)$ - } \\ - \hline -\end{tabularx} diff --git a/math/tables/stuff.tex b/math/tables/stuff.tex deleted file mode 100644 index 5b5093e..0000000 --- a/math/tables/stuff.tex +++ /dev/null @@ -1,32 +0,0 @@ -\begin{tabularx}{\linewidth}{|ll|} - \hline - \multicolumn{2}{|C|}{Verschiedenes} \\ - \hline - Türme von Hanoi, minimale Schirttzahl: & - $T_n = 2^n - 1$ \\ - - \#Regionen zwischen $n$ Geraden & - $\frac{n\left(n + 1\right)}{2} + 1$ \\ - - \#abgeschlossene Regionen zwischen $n$ Geraden & - $\frac{n^2 - 3n + 2}{2}$ \\ - - \#markierte, gewurzelte Bäume & - $n^{n-1}$ \\ - - \#markierte, nicht gewurzelte Bäume & - $n^{n-2}$ \\ - - \#Wälder mit $k$ gewurzelten Bäumen & - $\frac{k}{n}\binom{n}{k}n^{n-k}$ \\ - - \#Wälder mit $k$ gewurzelten Bäumen mit vorgegebenen Wurzelknoten& - $\frac{k}{n}n^{n-k}$ \\ - - Dearangements & - $!n = (n - 1)(!(n - 1) + !(n - 2)) = \left\lfloor\frac{n!}{e} + \frac{1}{2}\right\rfloor$ \\ - & - $\lim\limits_{n \to \infty} \frac{!n}{n!} = \frac{1}{e}$ \\ - \hline -\end{tabularx} - diff --git a/math/tables/twelvefold.tex b/math/tables/twelvefold.tex deleted file mode 100644 index 18d3955..0000000 --- a/math/tables/twelvefold.tex +++ /dev/null @@ -1,32 +0,0 @@ -\begin{expandtable} -\begin{tabularx}{\linewidth}{|C|CICICIC|} - \hline - Bälle & identisch & verschieden & identisch & verschieden \\ - Boxen & identisch & identisch & verschieden & verschieden \\ - \hline - -- & - $p_k(n + k)$ & - $\sum\limits_{i = 0}^k \stirlingII{n}{i}$ & - $\binom{n + k - 1}{k - 1}$ & - $k^n$ \\ - \grayhline - - \makecell{Bälle pro\\Box $\geq 1$} & - $p_k(n)$ & - $\stirlingII{n}{k}$ & - $\binom{n - 1}{k - 1}$ & - $k! \stirlingII{n}{k}$ \\ - \grayhline - - \makecell{Bälle pro\\Box $\leq 1$} & - $[n \leq k]$ & - $[n \leq k]$ & - $\binom{k}{n}$ & - $n! \binom{k}{n}$ \\ - \hline - \multicolumn{5}{|l|}{ - $[\text{Bedingung}]$: \code{return Bedingung ? 1 : 0;} - } \\ - \hline -\end{tabularx} -\end{expandtable} diff --git a/math/transforms/andTransform.cpp b/math/transforms/andTransform.cpp deleted file mode 100644 index 1fd9f5c..0000000 --- a/math/transforms/andTransform.cpp +++ /dev/null @@ -1,8 +0,0 @@ -void fft(vector& a, bool inv = false) { - int n = sz(a); - for (int s = 1; s < n; s *= 2) { - for (int i = 0; i < n; i += 2 * s) { - for (int j = i; j < i + s; j++) { - ll& u = a[j], &v = a[j + s]; - tie(u, v) = inv ? pair(v - u, u) : pair(v, u + v); -}}}} diff --git a/math/transforms/bitwiseTransforms.cpp b/math/transforms/bitwiseTransforms.cpp deleted file mode 100644 index 28561da..0000000 --- a/math/transforms/bitwiseTransforms.cpp +++ /dev/null @@ -1,12 +0,0 @@ -void bitwiseConv(vector& a, bool inv = false) { - int n = sz(a); - for (int s = 1; s < n; s *= 2) { - for (int i = 0; i < n; i += 2 * s) { - for (int j = i; j < i + s; j++) { - ll& u = a[j], &v = a[j + s]; - tie(u, v) = inv ? pair(v - u, u) : pair(v, u + v); // AND - //tie(u, v) = inv ? pair(v, u - v) : pair(u + v, u); //OR - //tie(u, v) = pair(u + v, u - v); // XOR - }}} - //if (inv) for (ll& x : a) x /= n; // XOR (careful with MOD) -} diff --git a/math/transforms/fft.cpp b/math/transforms/fft.cpp deleted file mode 100644 index 2bd95b2..0000000 --- a/math/transforms/fft.cpp +++ /dev/null @@ -1,23 +0,0 @@ -using cplx = complex; - -void fft(vector& a, bool inv = false) { - int n = sz(a); - for (int i = 0, j = 1; j < n - 1; ++j) { - for (int k = n >> 1; k > (i ^= k); k >>= 1); - if (j < i) swap(a[i], a[j]); - } - static vector ws(2, 1); - for (static int k = 2; k < n; k *= 2) { - ws.resize(n); - cplx w = polar(1.0, acos(-1.0) / k); - for (int i=k; i<2*k; i++) ws[i] = ws[i/2] * (i % 2 ? w : 1); - } - for (int s = 1; s < n; s *= 2) { - for (int j = 0; j < n; j += 2 * s) { - for (int k = 0; k < s; k++) { - cplx u = a[j + k], t = a[j + s + k]; - t *= (inv ? conj(ws[s + k]) : ws[s + k]); - a[j + k] = u + t; - a[j + s + k] = u - t; - if (inv) a[j + k] /= 2, a[j + s + k] /= 2; -}}}} diff --git a/math/transforms/fftMul.cpp b/math/transforms/fftMul.cpp deleted file mode 100644 index eac343c..0000000 --- a/math/transforms/fftMul.cpp +++ /dev/null @@ -1,14 +0,0 @@ -vector mul(vector& a, vector& b) { - vector c(sz(a)), d(sz(a)); - for (int i = 0; i < sz(b); i++) { - c[i] = {real(a[i]), real(b[i])}; - } - fft(c); - for (int i = 0; i < sz(b); i++) { - int j = (sz(a) - i) % sz(a); - cplx x = (c[i] + conj(c[j])) / cplx{2, 0}; //fft(a)[i]; - cplx y = (c[i] - conj(c[j])) / cplx{0, 2}; //fft(b)[i]; - d[i] = x * y; - } - return fft(d, true); -} diff --git a/math/transforms/multiplyBitwise.cpp b/math/transforms/multiplyBitwise.cpp deleted file mode 100644 index 0fa671c..0000000 --- a/math/transforms/multiplyBitwise.cpp +++ /dev/null @@ -1,8 +0,0 @@ -vector mul(vector a, vector b) { - int n = 1 << (__lg(max(sz(a), sz(b)) - 1) + 1); - a.resize(n), b.resize(n); - bitwiseConv(a), bitwiseConv(b); - for (int i=0; i mul(vector& a, vector& b) { - int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); - vector a2(all(a)), b2(all(b)); - a2.resize(n), b2.resize(n); - fft(a2), fft(b2); - for (int i=0; i ans(n); - for (int i=0; i mul(vector a, vector b) { - int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1); - a.resize(n), b.resize(n); - ntt(a), ntt(b); - for (int i=0; i& a, bool inv = false) { - int n = sz(a); - auto b = a; - ll r = inv ? powMod(root, mod - 2, mod) : root; - - for (int s = n / 2; s > 0; s /= 2) { - ll ws = powMod(r, (mod - 1) / (n / s), mod), w = 1; - for (int j = 0; j < n / 2; j += s) { - for (int k = j; k < j + s; k++) { - ll u = a[j + k], t = a[j + s + k] * w % mod; - b[k] = (u + t) % mod; - b[n/2 + k] = (u - t + mod) % mod; - } - w = w * ws % mod; - } - swap(a, b); - } - if (inv) { - ll div = powMod(n, mod - 2, mod); - for (auto& x : a) x = x * div % mod; -}} diff --git a/math/transforms/orTransform.cpp b/math/transforms/orTransform.cpp deleted file mode 100644 index eb1da44..0000000 --- a/math/transforms/orTransform.cpp +++ /dev/null @@ -1,8 +0,0 @@ -void fft(vector& a, bool inv = false) { - int n = sz(a); - for (int s = 1; s < n; s *= 2) { - for (int i = 0; i < n; i += 2 * s) { - for (int j = i; j < i + s; j++) { - ll& u = a[j], &v = a[j + s]; - tie(u, v) = inv ? pair(v, u - v) : pair(u + v, u); -}}}} diff --git a/math/transforms/seriesOperations.cpp b/math/transforms/seriesOperations.cpp deleted file mode 100644 index 4743674..0000000 --- a/math/transforms/seriesOperations.cpp +++ /dev/null @@ -1,56 +0,0 @@ -vector poly_inv(const vector& a, int n) { - vector q = {powMod(a[0], mod-2, mod)}; - for (int len = 1; len < n; len *= 2){ - vector a2 = a, q2 = q; - a2.resize(2*len), q2.resize(2*len); - ntt(q2); - for (int j : {0, 1}) { - ntt(a2); - for (int i = 0; i < 2*len; i++) a2[i] = a2[i]*q2[i] % mod; - ntt(a2, true); - for (int i = 0; i < len; i++) a2[i] = 0; - } - for (int i = len; i < min(n, 2*len); i++) { - q.push_back((mod - a2[i]) % mod); - }} - return q; -} - -vector poly_deriv(vector a) { - for (int i = 1; i < sz(a); i++) - a[i-1] = a[i] * i % mod; - a.pop_back(); - return a; -} - -vector poly_integr(vector a) { - if (a.empty()) return {0}; - a.push_back(a.back() * powMod(sz(a), mod-2, mod) % mod); - for (int i = sz(a)-2; i > 0; i--) - a[i] = a[i-1] * powMod(i, mod-2, mod) % mod; - a[0] = 0; - return a; -} - -vector poly_log(vector a, int n) { - a = mul(poly_deriv(a), poly_inv(a, n)); - a.resize(n-1); - a = poly_integr(a); - return a; -} - -vector poly_exp(vector a, int n) { - vector q = {1}; - for (int len = 1; len < n; len *= 2) { - vector p = poly_log(q, 2*len); - for (int i = 0; i < 2*len; i++) - p[i] = (mod - p[i] + (i < sz(a) ? a[i] : 0)) % mod; - vector q2 = q; - q2.resize(2*len); - ntt(p), ntt(q2); - for (int i = 0; i < 2*len; i++) p[i] = p[i] * q2[i] % mod; - ntt(p, true); - for (int i = len; i < min(n, 2*len); i++) q.push_back(p[i]); - } - return q; -} diff --git a/math/transforms/xorTransform.cpp b/math/transforms/xorTransform.cpp deleted file mode 100644 index f9d1d82..0000000 --- a/math/transforms/xorTransform.cpp +++ /dev/null @@ -1,10 +0,0 @@ -void fft(vector& a, bool inv = false) { - int n = sz(a); - for (int s = 1; s < n; s *= 2) { - for (int i = 0; i < n; i += 2 * s) { - for (int j = i; j < i + s; j++) { - ll& u = a[j], &v = a[j + s]; - tie(u, v) = pair(u + v, u - v); - }}} - if (inv) for (ll& x : a) x /= n; -} diff --git a/other/bitOps.cpp b/other/bitOps.cpp deleted file mode 100644 index 8079305..0000000 --- a/other/bitOps.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Iteriert über alle Teilmengen einer Bitmaske -// (außer der leeren Menge). -for (int subset = bitmask; subset > 0; - subset = (subset - 1) & bitmask) - -// Zählt Anzahl der gesetzten Bits. -int numberOfSetBits(int i) { - i = i - ((i >> 1) & 0x5555'5555); - i = (i & 0x3333'3333) + ((i >> 2) & 0x3333'3333); - return (((i + (i >> 4)) & 0x0F0F'0F0F) * 0x0101'0101) >> 24; -} - -// Nächste Permutation in Bitmaske -// (z.B. 00111 => 01011 => 01101 => ...) -ll nextPerm(ll v) { - ll t = v | (v - 1); - return (t+1) | (((~t & -~t) - 1) >> (__builtin_ctzll(v) + 1)); -} diff --git a/other/compiletime.cpp b/other/compiletime.cpp deleted file mode 100644 index b71f83b..0000000 --- a/other/compiletime.cpp +++ /dev/null @@ -1,7 +0,0 @@ -template -struct Table { - int data[N]; - constexpr Table() : data {} { - for (int i = 0; i < N; i++) data[i] = i; -}}; -constexpr Table<100'000> precalculated; diff --git a/other/divideAndConquer.cpp b/other/divideAndConquer.cpp deleted file mode 100644 index 92ec0ef..0000000 --- a/other/divideAndConquer.cpp +++ /dev/null @@ -1,27 +0,0 @@ -vector> dp; -vector> C; - -void rec(int i, int j0, int j1, int k0, int k1) { - if (j1 < j0) return; - int jmid = (j0 + j1) / 2; - - dp[i][jmid] = inf; - int bestk = k0; - for (int k = k0; k < min(jmid, k1 + 1); ++k) { - if (dp[i - 1][k] + C[k + 1][jmid] < dp[i][jmid]) { - dp[i][jmid] = dp[i - 1][k] + C[k + 1][jmid]; - bestk = k; - }} - - rec(i, j0, jmid - 1, k0, bestk); - rec(i, jmid + 1, j1, bestk, k1); -} - -ll calc(int n, int k) { - dp = vector>(k, vector(n, inf)); - for (int i = 0; i < n; i++) dp[0][i] = C[0][i]; - for (int i = 1; i < k; i++) { - rec(i, 0, n - 1, 0, n - 1); - } - return dp[k - 1][n - 1]; -} diff --git a/other/fastIO.cpp b/other/fastIO.cpp deleted file mode 100644 index 63f9ede..0000000 --- a/other/fastIO.cpp +++ /dev/null @@ -1,24 +0,0 @@ -void fastscan(int& number) { - bool negative = false; - register int c; - number = 0; - c = getchar(); - while(c != '-' && (c < '0' || c > '9')) c = getchar(); - if (c == '-') negative = true, c = getchar(); - for (; c >= '0' && c <= '9'; c = getchar()) number = number * 10 + c - '0'; - if (negative) number *= -1; -} - -void printPositive(int n) { - if (n == 0) return; - printPositive(n / 10); - putchar(n % 10 + '0'); -} - -void fastprint(int n) { - if(n == 0) {putchar('0'); return;} - if (n < 0) { - putchar('-'); - printPositive(-n); - } else printPositive(n); -} diff --git a/other/josephus2.cpp b/other/josephus2.cpp deleted file mode 100644 index 5086e13..0000000 --- a/other/josephus2.cpp +++ /dev/null @@ -1,8 +0,0 @@ -int rotateLeft(int n) { // Der letzte Überlebende, 1-basiert. - for (int i = 31; i >= 0; i--) { - if (n & (1 << i)) { - n &= ~(1 << i); - break; - }} - n <<= 1; n++; return n; -} diff --git a/other/josephusK.cpp b/other/josephusK.cpp deleted file mode 100644 index 5025f89..0000000 --- a/other/josephusK.cpp +++ /dev/null @@ -1,5 +0,0 @@ -// Der letzte Überlebende, 0-basiert. -int josephus(int n, int k) { - if (n == 1) return 0; - return (josephus(n - 1, k) + k) % n; -} diff --git a/other/knuth.cpp b/other/knuth.cpp deleted file mode 100644 index f619f82..0000000 --- a/other/knuth.cpp +++ /dev/null @@ -1,15 +0,0 @@ -ll calc(int n, int k, const vector> &C) { - vector> dp(k, vector(n, inf)); - vector> opt(k, vector(n + 1, n - 1)); - - for (int i = 0; i < n; i++) dp[0][i] = C[0][i]; - for (int i = 1; i < k; i++) { - for (int j = n - 1; j >= 0; --j) { - opt[i][j] = i == 1 ? 0 : opt[i - 1][j]; - for (int k = opt[i][j]; k <= min(opt[i][j+1], j-1); k++) { - if (dp[i][j] <= dp[i - 1][k] + C[k + 1][j]) continue; - dp[i][j] = dp[i - 1][k] + C[k + 1][j]; - opt[i][j] = k; - }}} - return dp[k - 1][n - 1]; -} diff --git a/other/other.tex b/other/other.tex deleted file mode 100644 index 38434a5..0000000 --- a/other/other.tex +++ /dev/null @@ -1,312 +0,0 @@ -\section{Sonstiges} - -\begin{algorithm}{Compiletime} - \begin{itemize} - \item überprüfen ob Compilezeit Berechnungen erlaubt sind! - \item braucht \code{c++14} oder höher! - \end{itemize} - \sourcecode{other/compiletime.cpp} -\end{algorithm} - -\begin{algorithm}{Timed} - Kann benutzt werden um randomisierte Algorithmen so lange wie möglich laufen zu lassen. - \sourcecode{other/timed.cpp} -\end{algorithm} - -\begin{algorithm}{Bit Operations} - \begin{expandtable} - \begin{tabularx}{\linewidth}{|Ll|} - \hline - Bit an Position j lesen & \code{(x & (1 << j)) != 0} \\ - Bit an Position j setzten & \code{x |= (1 << j)} \\ - Bit an Position j löschen & \code{x &= ~(1 << j)} \\ - Bit an Position j flippen & \code{x ^= (1 << j)} \\ - Anzahl an führenden nullen ($x \neq 0$) & \code{__builtin_clzll(x)} \\ - Anzahl an schließenden nullen ($x \neq 0$) & \code{__builtin_ctzll(x)} \\ - Anzahl an \code{1} bits & \code{__builtin_popcountll(x)} \\ - $i$-te Zahl eines Graycodes & \code{i ^ (i >> 1)} \\ - \hline - \end{tabularx}\\ - \end{expandtable} - \sourcecode{other/bitOps.cpp} -\end{algorithm} - -\begin{algorithm}{Overflow-sichere arithmetische Operationen} - Gibt zurück, ob es einen Overflow gab. Wenn nicht, enthält \code{c} das Ergebnis. - \begin{expandtable} - \begin{tabularx}{\linewidth}{|lR|} - \hline - Addition & \code{__builtin_saddll_overflow(a, b, &c)} \\ - Subtraktion & \code{__builtin_ssubll_overflow(a, b, &c)} \\ - Multiplikation & \code{__builtin_smulll_overflow(a, b, &c)} \\ - \hline - \end{tabularx} - \end{expandtable} -\end{algorithm} - -\begin{algorithm}{Pragmas} - \sourcecode{other/pragmas.cpp} -\end{algorithm} - -\begin{algorithm}{DP Optimizations} - Aufgabe: Partitioniere Array in genau $k$ zusammenhängende Teile mit minimalen Kosten: - $dp[i][j] = \min_{kn>0,~k>0$ und $m\not\equiv n \bmod 2$ dann beschreibt diese Formel alle Pythagoreischen Tripel eindeutig: - \[k~\cdot~\Big(~a=m^2-n^2,\quad b=2mn,\quad c=m^2+n^2~\Big)\] - - \item \textbf{Centroids of a Tree:} - Ein \emph{Centroid} ist ein Knoten, der einen Baum in Komponenten der maximalen Größe $\frac{\abs{V}}{2}$ splitted. - Es kann $2$ Centroids geben! - - \item \textbf{Centroid Decomposition:} - Wähle zufälligen Knoten und mache DFS. - Verschiebe ausgewählten Knoten in Richtung des tiefsten Teilbaums, bis Centroid gefunden. Entferne Knoten, mache rekursiv in Teilbäumen weiter. Laufzeit:~\runtime{\abs{V} \cdot \log(\abs{V})}. - \item \textbf{Gregorian Calendar:} Der Anfangstag des Jahres ist alle $400$ Jahre gleich. - - \item \textbf{Pivotsuche und Rekursion auf linkem und rechtem Teilarray:} - Suche gleichzeitig von links und rechts nach Pivot, um Worst Case von - $\runtime{n^2}$ zu $\runtime{n\log n}$ zu verbessern. - - \item \textbf{\textsc{Mo}'s Algorithm:} - SQRT-Decomposition auf $n$ Intervall Queries $[l,r]$. - Gruppiere Queries in $\sqrt{n}$ Blöcke nach linker Grenze $l$. - Sortiere nach Block und bei gleichem Block nach rechter Grenze $r$. - Beantworte Queries offline durch schrittweise Vergrößern/Verkleinern des aktuellen Intervalls. - Laufzeit:~\runtime{n\cdot\sqrt{n}}. - (Anzahl der Blöcke als Konstante in Code schreiben.) - - \item \textbf{SQRT Techniques:} - \begin{itemize} - \item Aufteilen in \emph{leichte} (wert $\leq\sqrt{x}$) und \emph{schwere} (höchsten $\sqrt{x}$ viele) Objekte. - \item Datenstruktur in Blöcke fester Größe (z.b. 256 oder 512) aufteilen. - \item Datenstruktur nach fester Anzahl Updates komplett neu bauen. - \item Wenn die Summe über $x_i$ durch $X$ beschränkt ist, dann gibt es nur $\sqrt{2X}$ verschiedene Werte von $x_i$ (z.b. Längen von Strings). - \item Wenn $w\cdot h$ durch $X$ beschränkt ist, dann ist $\min(w,h)\leq\sqrt{X}$. - \end{itemize} - - \item \textbf{Partition:} - Gegeben Gewichte $w_0+w_1+\cdots+w_k=W$, existiert eine Teilmenge mit Gewicht $x$? - Drei gleiche Gewichte $w$ können zu $w$ und $2w$ kombiniert werden ohne die Lösung zu ändern $\Rightarrow$ nur $2\sqrt{W}$ unterschiedliche Gewichte. - Mit bitsets daher selbst für $10^5$ lösbar. -\end{itemize} - -\subsection{Tipps \& Tricks} - -\begin{itemize} - \item \textbf{Run Time Error:} - \begin{itemize} - \item Stack Overflow? Evtl. rekursive Tiefensuche auf langem Pfad? - \item Array-Grenzen überprüfen. Indizierung bei $0$ oder bei $1$ beginnen? - \item Abbruchbedingung bei Rekursion? - \item Evtl. Memory Limit Exceeded? Mit \code{/usr/bin/time -v} erhält man den maximalen Speicherverbrauch bei der Ausführung (Maximum resident set size). - \end{itemize} - - \item \textbf{Strings:} - \begin{itemize} - \item Soll \codeSafe{"aa"} kleiner als \codeSafe{"z"} sein oder nicht? - \item bit \code{0x20} beeinflusst Groß-/Kleinschreibung. - \end{itemize} - - \item \textbf{Zeilenbasierte Eingabe}: - \begin{itemize} - \item \code{getline(cin, str)} liest Zeile ein. - \item Wenn vorher \code{cin >> ...} benutzt, lese letztes \code{\\n} mit \code{getline(cin, x)}. - \end{itemize} - - \item \textbf{Gleitkommazahlen:} - \begin{itemize} - \item \code{NaN}? Evtl. ungültige Werte für mathematische Funktionen, z.B. \mbox{\code{acos(1.00000000000001)}}? - \item Falsches Runden bei negativen Zahlen? Abschneiden $\neq$ Abrunden! - \item genügend Präzision oder Output in wissenschaftlicher Notation (\code{1e-25})? - \item Kann \code{-0.000} ausgegeben werden? - \end{itemize} - - \item \textbf{Wrong Answer:} - \begin{itemize} - \item Lies Aufgabe erneut. Sorgfältig! - \item Mehrere Testfälle in einer Datei? Probiere gleichen Testcase mehrfach hintereinander. - \item Integer Overflow? Teste maximale Eingabegrößen und mache Überschlagsrechnung. - \item Ausgabeformat im 'unmöglich'-Fall überprüfen. - \item Ist das Ergebnis modulo einem Wert? - \item Integer Division rundet zur $0$ $\neq$ abrunden. - \item Eingabegrößen überprüfen. Sonderfälle ausprobieren. - \begin{itemize} - \item $n = 0$, $n = -1$, $n = 1$, $n = 2^{31}-1$, $n = -2^{31}$ - \item $n$ gerade/ungerade - \item Graph ist leer/enthält nur einen Knoten. - \item Liste ist leer/enthält nur ein Element. - \item Graph ist Multigraph (enthält Schleifen/Mehrfachkanten). - \item Sind Kanten gerichtet/ungerichtet? - \item Kolineare Punkte existieren. - \item Polygon ist konkav/selbstschneidend. - \end{itemize} - \item Bei DP/Rekursion: Stimmt Basisfall? - \item Unsicher bei benutzten STL-Funktionen? - \end{itemize} -\end{itemize} diff --git a/other/pbs.cpp b/other/pbs.cpp deleted file mode 100644 index 7cb60e5..0000000 --- a/other/pbs.cpp +++ /dev/null @@ -1,19 +0,0 @@ -// Q = # of queries, bucket sort is sometimes faster -vector low(Q, 0), high(Q, MAX_OPERATIONS); -while (true) { - vector> focus; - for (int i = 0; i < Q; i++) if (low[i] < high[i]) { - focus.emplace_back((low[i] + high[i]) / 2, i); - } - if (focus.empty()) break; - sort(all(focus)); - - // reset simulation - for (int step = 0; auto [mid, i] : focus) { - while (step <= mid) { - // simulation step - step++; - } - if (/* requirement already fulfilled */) high[i] = mid; - else low[i] = mid + 1; -}} // answer in low (and high) diff --git a/other/pragmas.cpp b/other/pragmas.cpp deleted file mode 100644 index a39c850..0000000 --- a/other/pragmas.cpp +++ /dev/null @@ -1,6 +0,0 @@ -#pragma GCC optimize("Ofast") -#pragma GCC optimize ("unroll-loops") -#pragma GCC target("sse,sse2,sse3,ssse3,sse4," - "popcnt,abm,mmx,avx,tune=native") -#pragma GCC target("fpmath=sse,sse2") // no excess precision -#pragma GCC target("fpmath=387") // force excess precision diff --git a/other/sos.cpp b/other/sos.cpp deleted file mode 100644 index 01bc44c..0000000 --- a/other/sos.cpp +++ /dev/null @@ -1,6 +0,0 @@ -vector res(in); -for (int i = 1; i < sz(res); i *= 2) { - for (int mask = 0; mask < sz(res); mask++){ - if (mask & i) { - res[mask] += res[mask ^ i]; -}}} diff --git a/other/split.cpp b/other/split.cpp deleted file mode 100644 index 5e3966c..0000000 --- a/other/split.cpp +++ /dev/null @@ -1,10 +0,0 @@ -// Zerlegt s anhand aller Zeichen in delim. -vector split(string &s, string delim) { - vector result; char *token; - token = strtok((char*)s.c_str(), (char*)delim.c_str()); - while (token != NULL) { - result.push_back(string(token)); - token = strtok(NULL, (char*)delim.c_str()); - } - return result; -} diff --git a/other/stress.sh b/other/stress.sh deleted file mode 100644 index d264c2a..0000000 --- a/other/stress.sh +++ /dev/null @@ -1,7 +0,0 @@ -for i in {1..1000}; do - printf "\r$i" - python3 gen.py > input # generate test with gen.py - ./a.out < input > out # execute ./a.out - ./b.out < input > out2 # execute ./b.out - diff out out2 || break -done diff --git a/other/stuff.cpp b/other/stuff.cpp deleted file mode 100644 index 41543ad..0000000 --- a/other/stuff.cpp +++ /dev/null @@ -1,29 +0,0 @@ -// Alles-Header. -#include - -// Setzt deutsche Tastaturlayout / toggle mit alt + space -setxkbmap de -setxkbmap de,us -option grp:alt_space_toggle - -// Schnelle Ein-/Ausgabe mit cin/cout. -cin.tie(nullptr)->ios::sync_with_stdio(false); - -// Set mit eigener Sortierfunktion. -set set1(comp); - -// STL-Debugging, Compiler flags. --D_GLIBCXX_DEBUG -#define _GLIBCXX_DEBUG - -// 128-Bit Integer/Float. Muss zum Einlesen/Ausgeben -// in einen int oder long long gecastet werden. -__int128, __float128 - -// float mit Decimaldarstellung -#include -std::decimal::decimal128 - -// 1e18 < INF < Max_Value / 2 -constexpr ll INF = 0x3FFF'FFFF'FFFF'FFFFll; -// 1e9 < INF < Max_Value / 2 -constexpr int INF = 0x3FFF'FFFF; diff --git a/other/timed.cpp b/other/timed.cpp deleted file mode 100644 index b3ed4ef..0000000 --- a/other/timed.cpp +++ /dev/null @@ -1,3 +0,0 @@ -int times = clock(); -//run for 900ms -while (1000*(clock()-times)/CLOCKS_PER_SEC < 900) {...} diff --git a/python/io.py b/python/io.py deleted file mode 100644 index aa16d4c..0000000 --- a/python/io.py +++ /dev/null @@ -1,3 +0,0 @@ -n, m = map(int, input().split()) -A = list(map(int, input().split())) -print(n, m, *A) diff --git a/python/python.tex b/python/python.tex deleted file mode 100644 index e4346bf..0000000 --- a/python/python.tex +++ /dev/null @@ -1,10 +0,0 @@ -\section{Python} -\lstset{language=Python} - -\subsection{IO} -\lstinputlisting{python/io.py} - -\subsection{Recursion} -\lstinputlisting{python/recursion.py} - -\lstset{language=C++} diff --git a/python/recursion.py b/python/recursion.py deleted file mode 100644 index 45e0147..0000000 --- a/python/recursion.py +++ /dev/null @@ -1,2 +0,0 @@ -import sys -sys.setrecursionlimit(1000_007) diff --git a/string/ahoCorasick.cpp b/string/ahoCorasick.cpp deleted file mode 100644 index eac312c..0000000 --- a/string/ahoCorasick.cpp +++ /dev/null @@ -1,52 +0,0 @@ -constexpr ll ALPHABET_SIZE = 26, OFFSET = 'a'; -struct AhoCorasick { - struct vert { - int suffix = 0, ch, cnt = 0; - array nxt = {}; - - vert(int p, int c) : suffix(-p), ch(c) {} - }; - vector aho = {{0, -1}}; - - int addString(string &s) { - int v = 0; - for (auto c : s) { - int idx = c - OFFSET; - if (!aho[v].nxt[idx]) { - aho[v].nxt[idx] = sz(aho); - aho.emplace_back(v, idx); - } - v = aho[v].nxt[idx]; - } - aho[v].cnt++; - return v; // trie node index of pattern (pattern state) - } - - int getSuffix(int v) { - if (aho[v].suffix < 0) { - aho[v].suffix = go(getSuffix(-aho[v].suffix), aho[v].ch); - } - return aho[v].suffix; - } - - int go(int v, int idx) { // Root is v=0, idx is char - OFFSET - if (aho[v].nxt[idx]) return aho[v].nxt[idx]; - else return v == 0 ? 0 : go(getSuffix(v), idx); - } - - vector> adj; - vector dp; - void buildGraph() { - adj.resize(sz(aho)); - dp.assign(sz(aho), 0); - for (int i = 1; i < sz(aho); i++) { - adj[getSuffix(i)].push_back(i); - }} - - void dfs(int v = 0) { // dp on tree - for (int u : adj[v]) { - //dp[u] = dp[v] + aho[u].cnt; // pattern count - dfs(u); - dp[v] += dp[u]; // no of matches - }} -}; diff --git a/string/deBruijn.cpp b/string/deBruijn.cpp deleted file mode 100644 index e829137..0000000 --- a/string/deBruijn.cpp +++ /dev/null @@ -1,7 +0,0 @@ -string deBruijn(int n, char mi = '0', char ma = '1') { - string res, c(1, mi); - do { - if (n % sz(c) == 0) res += c; - } while(next(c, n, mi, ma)); - return res; -} diff --git a/string/duval.cpp b/string/duval.cpp deleted file mode 100644 index bf36cce..0000000 --- a/string/duval.cpp +++ /dev/null @@ -1,21 +0,0 @@ -vector> duval(const string& s) { - vector> res; - for (int i = 0; i < sz(s);) { - int j = i + 1, k = i; - for (; j < sz(s) && s[k] <= s[j]; j++) { - if (s[k] < s[j]) k = i; - else k++; - } - while (i <= k) { - res.push_back({i, i + j - k}); - i += j - k; - }} - return res; -} - -int minrotation(const string& s) { - auto parts = duval(s+s); - for (auto [l, r] : parts) { - if (l < sz(s) && r >= sz(s)) { - return l; -}}} diff --git a/string/kmp.cpp b/string/kmp.cpp deleted file mode 100644 index 421479e..0000000 --- a/string/kmp.cpp +++ /dev/null @@ -1,20 +0,0 @@ -vector kmpPreprocessing(const string& sub) { - vector b(sz(sub) + 1); - b[0] = -1; - for (int i = 0, j = -1; i < sz(sub);) { - while (j >= 0 && sub[i] != sub[j]) j = b[j]; - b[++i] = ++j; - } - return b; -} -vector kmpSearch(const string& s, const string& sub) { - vector result, pre = kmpPreprocessing(sub); - for (int i = 0, j = 0; i < sz(s);) { - while (j >= 0 && s[i] != sub[j]) j = pre[j]; - i++; j++; - if (j == sz(sub)) { - result.push_back(i - j); - j = pre[j]; - }} - return result; -} diff --git a/string/longestCommonSubsequence.cpp b/string/longestCommonSubsequence.cpp deleted file mode 100644 index 109fe72..0000000 --- a/string/longestCommonSubsequence.cpp +++ /dev/null @@ -1,15 +0,0 @@ -string lcss(const string& a, const string& b) { - vector> m(sz(a) + 1, vector(sz(b) + 1)); - for (int i = sz(a) - 1; i >= 0; i--) { - for (int j = sz(b) - 1; j >= 0; j--) { - if (a[i] == b[j]) m[i][j] = 1 + m[i+1][j+1]; - else m[i][j] = maj(m[i+1][j], m[i][j+1]); - }} // Für die Länge: return m[0][0]; - string res; - for (int j = 0, i = 0; j < sz(b) && i < sz(a);) { - if (a[i] == b[j]) res += a[i++], j++; - else if (m[i][j+1] > m[i+1][j]) j++; - else i++; - } - return res; -} diff --git a/string/lyndon.cpp b/string/lyndon.cpp deleted file mode 100644 index 858c3db..0000000 --- a/string/lyndon.cpp +++ /dev/null @@ -1,11 +0,0 @@ -bool next(string& s, int n, char mi = '0', char ma = '1') { - for (int i = sz(s), j = sz(s); i < n; i++) - s.push_back(s[i % j]); - while(!s.empty() && s.back() == ma) s.pop_back(); - if (s.empty()) { - s = mi; - return false; - } else { - s.back()++; - return true; -}} diff --git a/string/manacher.cpp b/string/manacher.cpp deleted file mode 100644 index 112bd55..0000000 --- a/string/manacher.cpp +++ /dev/null @@ -1,20 +0,0 @@ -vector manacher(const string& t) { - //transforms "aa" to ".a.a." to find even length palindromes - string s(sz(t) * 2 + 1, '.'); - for (int i = 0; i < sz(t); i++) s[2 * i + 1] = t[i]; - - int mid = 0, r = 0, n = sz(s); - vector pal(n); - for (int i = 1; i < n - 1; i++) { - if (r > i) pal[i] = min(r - i, pal[2 * mid - i]); - while (pal[i] < min(i, n - i - 1) && - s[i + pal[i] + 1] == s[i - pal[i] - 1]) { - pal[i]++; - } - if (i + pal[i] > r) mid = i, r = i + pal[i]; - } - - //convert lengths to constructed string s (optional) - //for (int i = 0; i < n; i++) pal[i] = 2 * pal[i] + 1; - return pal; -} diff --git a/string/rollingHash.cpp b/string/rollingHash.cpp deleted file mode 100644 index 00e2273..0000000 --- a/string/rollingHash.cpp +++ /dev/null @@ -1,16 +0,0 @@ -// q = 29, 53, 101, 257, 1009, 65'537 -// or choose q random from [sigma, m) -// m = 1'500'000'001, 1'600'000'009, 1'700'000'009 -template -struct Hasher { - vector power = {1}, pref = {0}; - Hasher(const string& s) { - for (auto x : s) { - power.push_back(power.back() * Q % M); - pref.push_back((pref.back() * Q % M + x) % M); - }} - - ll hash(int l, int r) { // Berechnet hash(s[l..r)). - return (pref[r] - power[r-l] * pref[l] % M + M) % M; - } -}; diff --git a/string/rollingHash2.cpp b/string/rollingHash2.cpp deleted file mode 100644 index f60db2e..0000000 --- a/string/rollingHash2.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// M = 1.7e9 + 9, 1e18L + 9, 2.2e18L + 7 -struct Hash { - static constexpr ll M = 3e18L + 37; - static constexpr ll Q = 318LL << 53; // Random in [SIGMA+1, M) - vector pref = {0}, power = {1}; - - Hash(const string& s) { - for (auto c : s) { - pref.push_back((mul(pref.back(), Q) + c + M) % M); - power.push_back(mul(power.back(), Q)); - }} - - ll operator()(int l, int r) { - return (pref[r] - mul(power[r-l], pref[l]) + M) % M; - } - - static ll mul(__int128 a, ll b) {return a * b % M;} -}; diff --git a/string/rollingHashCf.cpp b/string/rollingHashCf.cpp deleted file mode 100644 index b055b29..0000000 --- a/string/rollingHashCf.cpp +++ /dev/null @@ -1,17 +0,0 @@ -// q = 29, 53, 101, 257, 1009, 65'537 -// or choose q random from [sigma, m) -// m = 1'500'000'001, 1'600'000'009, 1'700'000'009 -struct Hasher { - vector power = {1}, pref = {0}; - ll m, q; char c; - Hasher(const string& s, ll m, ll q, char c) : - m(m), q(q), c(c) { - for (char x : s) { - power.push_back(power.back() * q % m); - pref.push_back((pref.back() * q % m + (x - c)) % m); - }} - - ll hash(int l, int r) { // Berechnet hash(s[l..r)). - return (pref[r] - power[r-l] * pref[l] % m + m) % m; - } -}; diff --git a/string/string.tex b/string/string.tex deleted file mode 100644 index dbea36c..0000000 --- a/string/string.tex +++ /dev/null @@ -1,132 +0,0 @@ -\section{Strings} - -\begin{algorithm}{\textsc{Knuth-Morris-Pratt}-Algorithmus} - \begin{methods} - \method{kmpSearch}{sucht \code{sub} in \code{s}}{\abs{s}+\abs{sub}} - \end{methods} - \sourcecode{string/kmp.cpp} -\end{algorithm} - -\begin{algorithm}{Z-Algorithmus} - \begin{methods}[ll] - $z_i\coloneqq$ Längstes gemeinsames Präfix von $s_0\cdots s_{n-1}$ und $s_i\cdots s_{n-1}$ & \runtime{n} - \end{methods} - Suchen: Z-Algorithmus auf \code{P\$S} ausführen, Positionen mit $z_i=\abs{P}$ zurückgeben - \sourcecode{string/z.cpp} -\end{algorithm} - -\begin{algorithm}{Rolling Hash} - \sourcecode{string/rollingHash2.cpp} -\end{algorithm} - -\begin{algorithm}{Pattern Matching mit Wildcards} - Gegeben zwei strings $A$ und $B$,$B$ enthält $k$ \emph{wildcards} enthält. Sei: - \begin{align*} - a_i&=\cos(\alpha_i) + i\sin(\alpha_i) &\text{ mit } \alpha_i&=\frac{2\pi A[i]}{\Sigma}\\ - b_i&=\cos(\beta_i) + i\sin(\beta_i) &\text{ mit } \beta_i&=\begin{cases*} - \frac{2\pi B[\abs{B}-i-1]}{\Sigma} & falls $B[\abs{B}-i-1]\in\Sigma$ \\ - 0 & sonst - \end{cases*} - \end{align*} - $B$ matcht $A$ an stelle $i$ wenn $(b\cdot a)[|B|-1+i]=|B|-k$. - Benutze FFT um $(b\cdot a)$ zu berechnen. -\end{algorithm} - -\begin{algorithm}{\textsc{Manacher}'s Algorithm, Longest Palindrome} - \begin{methods} - \method{init}{transformiert \code{string a}}{n} - \method{manacher}{berechnet Längen der Palindrome in longest}{n} - \end{methods} - \sourcecode{string/manacher.cpp} -\end{algorithm} - -\begin{algorithm}{Longest Common Subsequence} - \begin{methods} - \method{lcss}{findet längste gemeinsame Sequenz}{\abs{a}\*\abs{b}} - \end{methods} - \sourcecode{string/longestCommonSubsequence.cpp} -\end{algorithm} - -\columnbreak -\begin{algorithm}{\textsc{Aho-Corasick}-Automat} - \begin{methods}[ll] - sucht patterns im Text & \runtime{\abs{Text}+\sum\abs{pattern}} - \end{methods} - \begin{enumerate} - \item mit \code{addString(pattern, idx)} Patterns hinzufügen. - \item rufe \code{buildGraph()} auf - \item mit \code{state = go(state, idx)} in nächsten Zustand wechseln. - \item erhöhe dabei \code{dp[state]++} - \item rufe \code{dfs()} auf. In dp[pattern state] stehen die Anzahl der Matches - \end{enumerate} - \sourcecode{string/ahoCorasick.cpp} -\end{algorithm} -\clearpage - -\begin{algorithm}{Lyndon und De-Bruijn} - \begin{itemize} - \item \textbf{Lyndon-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen. - \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von Lyndon-Worten zerlegt werden. - \item Für Lyndon-Worte $u, v$ mit $u SA, LCP; - vector> P; - - SuffixArray(const string& s) : n(sz(s)), SA(n), LCP(n), - P(__lg(2 * n - 1) + 1, vector(n)) { - P[0].assign(all(s)); - iota(all(SA), 0); - sort(all(SA), [&](int a, int b) {return s[a] < s[b];}); - vector x(n); - for (int k = 1, c = 1; c < n; k++, c *= 2) { - iota(all(x), n - c); - for (int ptr = c; int i : SA) if (i >= c) x[ptr++] = i - c; - - vector cnt(k == 1 ? MAX_CHAR : n); - for (int i : P[k-1]) cnt[i]++; - partial_sum(all(cnt), begin(cnt)); - for (int i : x | views::reverse) SA[--cnt[P[k-1][i]]] = i; - - auto p = [&](int i) {return i < n ? P[k-1][i] : -1;}; - for (int i = 1; i < n; i++) { - int a = SA[i-1], b = SA[i]; - P[k][b] = P[k][a] + (p(a) != p(b) || p(a+c) != p(b+c)); - }} - for (int i = 1; i < n; i++) LCP[i] = lcp(SA[i-1], SA[i]); - } - - int lcp(int x, int y) {//x & y are text-indices, not SA-indices - if (x == y) return n - x; - int res = 0; - for (int i = sz(P) - 1; i >= 0 && max(x, y) + res < n; i--) { - if (P[i][x + res] == P[i][y + res]) res |= 1 << i; - } - return res; - } -}; diff --git a/string/suffixAutomaton.cpp b/string/suffixAutomaton.cpp deleted file mode 100644 index 291f760..0000000 --- a/string/suffixAutomaton.cpp +++ /dev/null @@ -1,59 +0,0 @@ -constexpr int ALPHABET_SIZE = 26; -constexpr char OFFSET = 'a'; -struct SuffixAutomaton { - struct State { - int len, link = -1; - array next = {}; // map if large Alphabet - State(int l) : len(l) {} - }; - - vector st = {State(0)}; - int cur = 0; - - SuffixAutomaton(const string& s) { - st.reserve(2 * sz(s)); - for (auto c : s) extend(c - OFFSET); - } - - void extend(int c) { - int p = cur; - cur = sz(st); - st.emplace_back(st[p].len + 1); - for (; p != -1 && !st[p].next[c]; p = st[p].link) { - st[p].next[c] = cur; - } - if (p == -1) st[cur].link = 0; - else { - int q = st[p].next[c]; - if (st[p].len + 1 == st[q].len) { - st[cur].link = q; - } else { - st.emplace_back(st[p].len + 1); - st.back().link = st[q].link; - st.back().next = st[q].next; - for (; p != -1 && st[p].next[c] == q; p = st[p].link) { - st[p].next[c] = sz(st) - 1; - } - st[q].link = st[cur].link = sz(st) - 1; - }}} - - vector calculateTerminals() { - vector terminals; - for (int p = cur; p != -1; p = st[p].link) { - terminals.push_back(p); - } - return terminals; - } - - // Pair with start index (in t) and length of LCS. - pair longestCommonSubstring(const string& t) { - int v = 0, l = 0, best = 0, bestp = 0; - for (int i = 0; i < sz(t); i++) { - int c = t[i] - OFFSET; - for (; v && !st[v].next[c]; v = st[v].link) l = st[v].len; - if (st[v].next[c]) v = st[v].next[c], l++; - if (l > best) best = l, bestp = i; - } - return {bestp - best + 1, best}; - } -}; diff --git a/string/suffixTree.cpp b/string/suffixTree.cpp deleted file mode 100644 index caeeecf..0000000 --- a/string/suffixTree.cpp +++ /dev/null @@ -1,72 +0,0 @@ -struct SuffixTree { - struct Vert { - int start, end, suf; - map next; - }; - string s; - int needsSuffix, pos, remainder, curVert, curEdge, curLen; - // Each Vertex gives its children range as [start, end) - vector tree = {Vert{-1, -1, 0 {}}}; - - SuffixTree(const string& s) : s(s) { - needsSuffix = remainder = curVert = curEdge = curLen = 0; - pos = -1; - for (int i = 0; i < sz(s); i++) extend(); - } - - int newVert(int start, int end) { - tree.push_back({start, end, 0, {}}); - return sz(tree) - 1; - } - - void addSuffixLink(int vert) { - if (needsSuffix) tree[needsSuffix].suf = vert; - needsSuffix = vert; - } - - bool fullImplicitEdge(int vert) { - len = min(tree[vert].end, pos + 1) - tree[vert].start; - if (curLen >= len) { - curEdge += len; - curLen -= len; - curVert = vert; - return true; - } else { - return false; - }} - - void extend() { - pos++; - needsSuffix = 0; - remainder++; - while (remainder) { - if (curLen == 0) curEdge = pos; - if (!tree[curVert].next.count(s[curEdge])) { - int leaf = newVert(pos, sz(s)); - tree[curVert].next[s[curEdge]] = leaf; - addSuffixLink(curVert); - } else { - int nxt = tree[curVert].next[s[curEdge]]; - if (fullImplicitEdge(nxt)) continue; - if (s[tree[nxt].start + curLen] == s[pos]) { - curLen++; - addSuffixLink(curVert); - break; - } - int split = newVert(tree[nxt].start, - tree[nxt].start + curLen); - tree[curVert].next[s[curEdge]] = split; - int leaf = newVert(pos, sz(s)); - tree[split].next[s[pos]] = leaf; - tree[nxt].start += curLen; - tree[split].next[s[tree[nxt].start]] = nxt; - addSuffixLink(split); - } - remainder--; - if (curVert == 0 && curLen) { - curLen--; - curEdge = pos - remainder + 1; - } else { - curVert = tree[curVert].suf ? tree[curVert].suf : 0; - }}} -}; \ No newline at end of file diff --git a/string/trie.cpp b/string/trie.cpp deleted file mode 100644 index 0544a9f..0000000 --- a/string/trie.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// Zahlenwerte müssen bei 0 beginnen und zusammenhängend sein. -constexpr int ALPHABET_SIZE = 2; -struct node { - int words, wordEnds; vector children; - node() : words(0), wordEnds(0), children(ALPHABET_SIZE, -1){} -}; -vector trie = {node()}; - -int insert(vector& word) { - int id = 0; - for (int c : word) { - trie[id].words++; - if (trie[id].children[c] < 0) { - trie[id].children[c] = sz(trie); - trie.emplace_back(); - } - id = trie[id].children[c]; - } - trie[id].words++; - trie[id].wordEnds++; - return id; -} - -void erase(vector& word) { - int id = 0; - for (int c : word) { - trie[id].words--; - id = trie[id].children[c]; - if (id < 0) return; - } - trie[id].words--; - trie[id].wordEnds--; -} diff --git a/string/z.cpp b/string/z.cpp deleted file mode 100644 index 069fa38..0000000 --- a/string/z.cpp +++ /dev/null @@ -1,10 +0,0 @@ -vector Z(const string& s) { - int n = sz(s); - vector z(n); - for (int i = 1, x = 0; i < n; i++) { - z[i] = max(0, min(z[i - x], x + z[x] - i)); - while (i + z[i] < n && s[z[i]] == s[i + z[i]]) { - x = i, z[i]++; - }} - return z; -} diff --git a/tcr.pdf b/tcr.pdf index 5cc63a7..6cfe7d1 100644 Binary files a/tcr.pdf and b/tcr.pdf differ diff --git a/tcr.tex b/tcr.tex deleted file mode 100644 index b327b37..0000000 --- a/tcr.tex +++ /dev/null @@ -1,65 +0,0 @@ - -%maybe size 9pt if too many pages -\documentclass[a4paper,fontsize=7.8pt]{scrartcl} - -% General information. -\newcommand{\teamname}{Kindergarten Timelimit} -\newcommand{\university}{Karlsruhe Institute of Technology} - -% Options -\newif\ifoptional -%\optionaltrue - -% Font encoding. -\usepackage[T1]{fontenc} -\usepackage[ngerman]{babel} -\usepackage[utf8]{inputenc} -\usepackage[hidelinks,pdfencoding=auto]{hyperref} - -% Include headers. -\usepackage{latexHeaders/layout} -\usepackage{latexHeaders/math} -\usepackage{latexHeaders/code} -\usepackage{latexHeaders/commands} - -% Title and author information. -\title{Team Contest Reference} -\author{\teamname \\ \university} -\date{\today} -\begin{document} - -% Titlepage with table of contents. -\setlength{\columnsep}{1cm} -\optional{ -\maketitle -\begin{multicols*}{3} - \tableofcontents -\end{multicols*} -} - -\newpage - -% Content. -\begin{multicols*}{3} - \input{datastructures/datastructures} - \input{graph/graph} - \input{geometry/geometry} - \input{math/math} -\end{multicols*} - \clearpage - \input{math/tables} -\begin{multicols*}{3} - \input{string/string} - \input{python/python} - \input{other/other} - \input{template/template} - \clearpage - \ifodd\value{page} - \else - \null - \thispagestyle{empty} - \clearpage - \fi - \input{tests/test} -\end{multicols*} -\end{document} diff --git a/template/console.cpp b/template/console.cpp deleted file mode 100644 index 31885e9..0000000 --- a/template/console.cpp +++ /dev/null @@ -1,2 +0,0 @@ -alias comp="g++ -std=gnu++17 -O2 -Wall -Wextra -Wconversion -Wshadow" -alias dbg="comp -g -fsanitize=address,undefined" diff --git a/template/template.cpp b/template/template.cpp deleted file mode 100644 index 88bdd56..0000000 --- a/template/template.cpp +++ /dev/null @@ -1,19 +0,0 @@ -#include -using namespace std; - -#define tsolve int t; cin >> t; while(t--) solve -#define debug(x) cerr << __LINE__ << ": "#x" = " << (x) << endl -#define nl '\n' -#define all(x) ::begin(x), ::end(x) -#define sz(x) (ll)::size(x) - -using ll = long long; -using ld = long double; - -void solve() {} - -int main() { - cin.tie(0)->sync_with_stdio(false); - cout << setprecision(16); - solve(); -} diff --git a/template/template.tex b/template/template.tex deleted file mode 100644 index 3525ddf..0000000 --- a/template/template.tex +++ /dev/null @@ -1,9 +0,0 @@ -\section{Template} - -\begin{algorithm}{C++} - \sourcecode{template/template.cpp} -\end{algorithm} - -\begin{algorithm}{Console} - \sourcecode{template/console.cpp} -\end{algorithm} diff --git a/test/datastructures/bitset.cpp b/test/datastructures/bitset.cpp new file mode 100644 index 0000000..2ba61a5 --- /dev/null +++ b/test/datastructures/bitset.cpp @@ -0,0 +1,6 @@ +#include "../util.h" + +int main() { + int x = 0; + #include +} diff --git a/test/datastructures/fenwickTree.cpp b/test/datastructures/fenwickTree.cpp new file mode 100644 index 0000000..c1ef6bf --- /dev/null +++ b/test/datastructures/fenwickTree.cpp @@ -0,0 +1,58 @@ +#include "../util.h" +#include + +//void update(int i, ll val) +//void init(int n) +//prefix_sum(int i) + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100; tries++) { + int n = Random::integer(10, 100); + init(n); + vector naive(n); + for (int operations = 0; operations < 1000; operations++) { + { + int i = Random::integer(0, n); + ll x = Random::integer(-1000, 1000); + update(i, x); + naive[i] += x; + } + { + queries++; + int i = Random::integer(0, n); + ll got = prefix_sum(i); + ll expected = 0; + for (int j = 0; j <= i; j++) expected += naive[j]; + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + t.start(); + init(N); + t.stop(); + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + int i = Random::integer(0, N); + int j = Random::integer(0, N); + ll x = Random::integer(-1000, 1000); + + t.start(); + update(i, x); + hash ^= prefix_sum(j); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/datastructures/fenwickTree2.cpp b/test/datastructures/fenwickTree2.cpp new file mode 100644 index 0000000..aa99576 --- /dev/null +++ b/test/datastructures/fenwickTree2.cpp @@ -0,0 +1,60 @@ +#include "../util.h" +#include + +//void update(int l, int r, ll val) +//void init(int n) +//prefix_sum(int i) + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100; tries++) { + int n = Random::integer(10, 100); + vector naive(n);// = Random::integers(n, -1000, 1000); + init(naive); + for (int operations = 0; operations < 1000; operations++) { + { + auto [i, j] = Random::pair(0, n); + ll x = Random::integer(-1000, 1000); + update(i, j, x); + for (int k = i; k < j; k++) naive[k] += x; + } + { + queries++; + int i = Random::integer(0, n); + ll got = prefix_sum(i); + ll expected = 0; + for (int j = 0; j <= i; j++) expected += naive[j]; + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + vector tmp = Random::integers(N, -1000, 1000); + t.start(); + init(tmp); + t.stop(); + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + int i = Random::integer(0, N); + int j = Random::integer(0, N); + int k = Random::integer(0, N); + ll x = Random::integer(-1000, 1000); + + t.start(); + update(i, j, x); + hash ^= prefix_sum(k); + t.stop(); + } + if (t.time > 750) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/datastructures/lazyPropagation.cpp b/test/datastructures/lazyPropagation.cpp new file mode 100644 index 0000000..7002061 --- /dev/null +++ b/test/datastructures/lazyPropagation.cpp @@ -0,0 +1,61 @@ +#include "../util.h" +constexpr ll inf = LL::INF; +#include + +constexpr int N = 1000'000; + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100; tries++) { + int n = Random::integer(10, 100); + vector naive = Random::integers(n, -1000, 1000); + SegTree tree(naive); + for (int operations = 0; operations < 1000; operations++) { + { + int l = Random::integer(0, n + 1); + int r = Random::integer(0, n + 1); + //if (l > r) swap(l, r); + ll x = Random::integer(-1000, 1000); + tree.update(l, r, x); + for (int j = l; j < r; j++) naive[j] = x; + } + { + queries++; + int l = Random::integer(0, n + 1); + int r = Random::integer(0, n + 1); + //if (l > r) swap(l, r); + ll got = tree.query(l, r); + ll expected = 0; + for (int j = l; j < r; j++) expected += naive[j]; + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + } + } + cerr << "tested random queries: " << queries << endl; +} + +void performance_test() { + timer t; + t.start(); + vector tmp(N); + SegTree tree(tmp); + t.stop(); + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + auto [l1, r1] = Random::pair(0, N + 1); + auto [l2, r2] = Random::pair(0, N + 1); + ll x1 = Random::integer(-1000, 1000); + + t.start(); + tree.update(l1, r1, x1); + hash ^= tree.query(l2, r2); + t.stop(); + } + if (t.time > 2000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/datastructures/pbds.cpp b/test/datastructures/pbds.cpp new file mode 100644 index 0000000..9080332 --- /dev/null +++ b/test/datastructures/pbds.cpp @@ -0,0 +1,11 @@ +#include "../util.h" +#include + +int main() { + Tree t1, t2; + swap(t1, t2); + hashSet s1, s2; + swap(s1, s2); + hashMap m1, m2; + swap(m1, m2); +} \ No newline at end of file diff --git a/test/datastructures/segmentTree.cpp b/test/datastructures/segmentTree.cpp new file mode 100644 index 0000000..fbac13e --- /dev/null +++ b/test/datastructures/segmentTree.cpp @@ -0,0 +1,122 @@ +#include "../util.h" +#include + +constexpr int N = 1'000'000; + +//void update(int i, ll val) +//ll query(int l, int r) + +//point update + range query +void stress_test1() { + ll queries = 0; + for (int tries = 0; tries < 100; tries++) { + int n = Random::integer(10, 100); + vector naive = Random::integers(n, -1000, 1000); + SegTree tree(naive); + for (int operations = 0; operations < 1000; operations++) { + { + int i = Random::integer(0, n); + ll x = Random::integer(-1000, 1000); + tree.update(i, x); + naive[i] = x;//point assignment + } + { + queries++; + int l = Random::integer(0, n + 1); + int r = Random::integer(0, n + 1); + //if (l > r) swap(l, r); + ll got = tree.query(l, r); + ll expected = 0; + for (int j = l; j < r; j++) expected += naive[j];//range sum + if (got != expected) cerr << " got: " << got << ", expected: " << expected << FAIL; + } + } + } + cerr << " tested random queries: " << queries << endl; +} + +//point update + range query +void performance_test1() { + timer t; + t.start(); + vector tmp(N); + SegTree tree(tmp); + t.stop(); + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + int i = Random::integer(0, N); + auto [l, r] = Random::pair(0, N + 1); + ll x = Random::integer(-1000, 1000); + + t.start(); + tree.update(i, x); + hash ^= tree.query(l, r); + t.stop(); + } + if (t.time > 1000) cerr << " too slow: " << t.time << FAIL; + cerr << " tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +//void modify(int l, int r, T val) +//ll query(int i) + +//range update + point query +void stress_test2() { + ll queries = 0; + for (int tries = 0; tries < 100; tries++) { + int n = Random::integer(10, 100); + vector naive(n); + SegTree tree(naive); + naive = Random::integers(n, -1000, 1000); + copy(all(naive), tree.tree.begin() + n); + for (int operations = 0; operations < 1000; operations++) { + { + int l = Random::integer(0, n + 1); + int r = Random::integer(0, n + 1); + //if (l > r) swap(l, r); + ll x = Random::integer(-1000, 1000); + tree.modify(l, r, x); + for (int j = l; j < r; j++) naive[j] += x;//range add + } + { + queries++; + int i = Random::integer(0, n); + ll got = tree.query(i); + ll expected = naive[i];//point query + if (got != expected) cerr << " got: " << got << ", expected: " << expected << FAIL; + } + } + } + cerr << " tested random queries: " << queries << endl; +} + +//range update + point query +void performance_test2() { + timer t; + t.start(); + vector tmp(N); + SegTree tree(tmp); + t.stop(); + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + int i = Random::integer(0, N); + auto [l, r] = Random::pair(0, N + 1); + ll x = Random::integer(-1000, 1000); + + t.start(); + tree.modify(l, r, x); + hash ^= tree.query(i); + t.stop(); + } + if (t.time > 1000) cerr << " too slow: " << t.time << FAIL; + cerr << " tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + cerr << "point update + range query:" << endl; + stress_test1(); + performance_test1(); + cerr << "range update + point query" << endl; + stress_test2(); + performance_test2(); +} diff --git a/test/datastructures/sparseTable.cpp b/test/datastructures/sparseTable.cpp new file mode 100644 index 0000000..7577694 --- /dev/null +++ b/test/datastructures/sparseTable.cpp @@ -0,0 +1,51 @@ +#include "../util.h" +constexpr ll INF = LL::INF; +#include + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 1000; tries++) { + int n = Random::integer(1, 100); + vector naive = Random::integers(n, -1000, 1000); + SparseTable st; + st.init(&naive); + for (int operations = 0; operations < 1000; operations++) { + queries++; + int l = Random::integer(0, n+1); + int r = Random::integer(0, n+1); + + ll got = st.queryIdempotent(l, r); + ll expected = r <= l ? -1 : l; + for (int j = l; j < r; j++) { + if (naive[j] < naive[expected]) expected = j; + } + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'500'000; +void performance_test() { + timer t; + vector naive = Random::integers(N, -1000, 1000); + t.start(); + SparseTable st; + st.init(&naive); + t.stop(); + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + auto [l, r] = Random::pair(0, N+1); + + t.start(); + hash += st.queryIdempotent(l, r); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/datastructures/sparseTableDisjoint.cpp b/test/datastructures/sparseTableDisjoint.cpp new file mode 100644 index 0000000..77bb005 --- /dev/null +++ b/test/datastructures/sparseTableDisjoint.cpp @@ -0,0 +1,48 @@ +#include "../util.h" +#include + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 1000; tries++) { + int n = Random::integer(1, 100); + vector naive = Random::integers(n, -1000, 1000); + DisjointST st; + st.init(&naive); + for (int operations = 0; operations < 1000; operations++) { + queries++; + int l = Random::integer(0, n+1); + int r = Random::integer(0, n+1); + + ll got = st.query(l, r); + ll expected = 0; + for (int j = l; j < r; j++) expected += naive[j]; + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'250'000; +void performance_test() { + timer t; + vector naive = Random::integers(N, -1000, 1000); + t.start(); + DisjointST st; + st.init(&naive); + t.stop(); + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + auto [l, r] = Random::pair(0, N+1); + + t.start(); + hash += st.query(l, r); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/datastructures/stlHashMap.cpp b/test/datastructures/stlHashMap.cpp new file mode 100644 index 0000000..77976fd --- /dev/null +++ b/test/datastructures/stlHashMap.cpp @@ -0,0 +1,4 @@ +#include "../util.h" +#include + +int main() {} \ No newline at end of file diff --git a/test/datastructures/stlTree.cpp b/test/datastructures/stlTree.cpp new file mode 100644 index 0000000..7bacbee --- /dev/null +++ b/test/datastructures/stlTree.cpp @@ -0,0 +1,2 @@ +#include "../util.h" +#include diff --git a/test/datastructures/unionFind.cpp b/test/datastructures/unionFind.cpp new file mode 100644 index 0000000..2afdc86 --- /dev/null +++ b/test/datastructures/unionFind.cpp @@ -0,0 +1,109 @@ +#include "../util.h" +struct UF { + UF(int n) {init(n);} + #include +}; + +struct Naive { + vector> adj; + vector seen; + int counter; + Naive(int n) : adj(n), seen(n), counter(0) {} + + template + void dfs(int x, F&& f) { + counter++; + vector todo = {x}; + seen[x] = counter; + while (!todo.empty()) { + x = todo.back(); + todo.pop_back(); + f(x); + for (ll y : adj[x]) { + if (seen[y] != counter) { + seen[y] = counter; + todo.push_back(y); + } + } + } + } + + int findSet(int a) { + int res = a; + dfs(a, [&](int x){res = min(res, x);}); + return res; + } + + void unionSets(int a, int b) { + adj[a].push_back(b); + adj[b].push_back(a); + } + + int size(int a) { + int res = 0; + dfs(a, [&](int /**/){res++;}); + return res; + } +}; + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 200; tries++) { + int n = Random::integer(1, 100); + UF uf(n); + Naive naive(n); + for (int i = 0; i < n; i++) { + for (int j = 0; j < 10; j++) { + int a = Random::integer(0, n); + int b = Random::integer(0, n); + uf.unionSets(a, b); + naive.unionSets(a, b); + } + UF tmp = uf; + for (int j = 0; j < n; j++) { + { + auto got = tmp.size(j); + auto expected = naive.size(j); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + { + int a = Random::integer(0, n); + int b = Random::integer(0, n); + bool got = tmp.findSet(a) == tmp.findSet(b); + bool expected = naive.findSet(a) == naive.findSet(b); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + } + queries += n; + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 2'000'000; +void performance_test() { + timer t; + t.start(); + UF uf(N); + t.stop(); + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + int i = Random::integer(0, N); + int j = Random::integer(0, N); + int k = Random::integer(0, N); + int l = Random::integer(0, N); + + t.start(); + uf.unionSets(i, j); + hash += uf.size(k); + hash += uf.size(l); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/datastructures/waveletTree.cpp b/test/datastructures/waveletTree.cpp new file mode 100644 index 0000000..d294835 --- /dev/null +++ b/test/datastructures/waveletTree.cpp @@ -0,0 +1,75 @@ +#include "../util.h" +#include + +constexpr int N = 1000'000; + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100; tries++) { + int n = Random::integer(10, 100); + vector naive = Random::integers(n, -1000, 1000); + WaveletTree tree(naive); + for (int operations = 0; operations < 1000; operations++) { + { + queries++; + int l = Random::integer(0, n + 1); + int r = Random::integer(0, n + 1); + //if (l > r) swap(l, r); + int x = Random::integer(-1, n); + ll got = tree.kth(l, r, x); + ll expected = -1; + if (x >= 0 && l + x < r) { + vector tmp(naive.begin() + l, naive.begin() + r); + std::sort(all(tmp)); + expected = tmp[x]; + } + if (got != expected) { + cerr << "kth, got: " << got << ", expected: " << expected << FAIL; + } + } + { + queries++; + int l = Random::integer(0, n + 1); + int r = Random::integer(0, n + 1); + //if (l > r) swap(l, r); + ll x = Random::integer(-1000, 1000); + ll got = tree.countSmaller(l, r, x); + ll expected = 0; + for (int j = l; j < r; j++) { + if (naive[j] < x) expected++; + } + if (got != expected) { + cerr << "count, got: " << got << ", expected: " << expected << FAIL; + } + } + } + } + cerr << "tested random queries: " << queries << endl; +} + +void performance_test() { + timer t; + vector tmp = Random::integers(N, -1000, 1000); + t.start(); + WaveletTree tree(tmp); + t.stop(); + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + auto [l1, r1] = Random::pair(0, N + 1); + auto [l2, r2] = Random::pair(0, N + 1); + int x1 = Random::integer(l1, r1 + 1); + ll x2 = Random::integer(-1000, 1000); + + t.start(); + hash ^= tree.kth(l1, r1, x1); + hash ^= tree.countSmaller(l2, r2, x2); + t.stop(); + } + if (t.time > 2000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/geometry.h b/test/geometry.h new file mode 100644 index 0000000..7886fe2 --- /dev/null +++ b/test/geometry.h @@ -0,0 +1,140 @@ +#include + +namespace details { + // Liegt p auf der Strecke a-b? + bool pointInLineSegment(pt a, pt b, pt p) { + if (cross(a, b, p) != 0) return false; + auto dist = norm(a - b); + return norm(a - p) < dist && norm(b - p) < dist; + } + + // Test auf Streckenschnitt zwischen a-b und c-d. + // (nur intern) + bool lineSegmentIntersection(pt a, pt b, pt c, pt d) { + if (cross(a, b, c) == 0 && cross(a, b, d) == 0) { + return pointInLineSegment(a,b,c) || + pointInLineSegment(a,b,d) || + pointInLineSegment(c,d,a) || + pointInLineSegment(c,d,b); + } + return ccw(a, b, c) * ccw(a, b, d) < 0 && + ccw(c, d, a) * ccw(c, d, b) < 0; + } +} + +namespace Random { + vector partition(ll n, std::size_t k){//min = 0; + n += k; + vector res = Random::distinct(k-1, 1, n); + sort(all(res)); + res.emplace_back(n); + ll last = 0; + for (std::size_t i = 0; i < k; i++) { + res[i] -= last; + last += res[i]; + res[i]--; + } + return res; + } + + vector convex(int n, ll dim) { + binomial_distribution binomial(n - 2, 0.5); + + while (true) { + int left = 1 + binomial(Random::rng); + int down = 1 + binomial(Random::rng); + auto x = Random::partition(2 * dim - 2, left); + auto y = Random::partition(2 * dim - 2, down); + for (auto& z : x) z = -z; + for (auto& z : y) z = -z; + for (auto z : Random::partition(2 * dim - 2, n - left)) x.push_back(z); + for (auto z : Random::partition(2 * dim - 2, n - down)) y.push_back(z); + auto itX = std::partition(x.begin(), x.end(), [](ll z){return z == 0;}); + auto itY = std::partition(y.begin(), y.end(), [](ll z){return z != 0;}); + if (distance(x.begin(), itX) + distance(itY, y.end()) > n) continue; + shuffle(itX, x.end(), Random::rng); + if (itX != x.begin()) shuffle(y.begin(), itY, Random::rng); + + vector dirs(n); + for (size_t i = 0; i < dirs.size(); i++) { + dirs[i] = pt(x[i], y[i]); + } + sortAround(0, dirs); + + vector res = {{0, 0}}; + ll maxX = 0; + ll maxY = 0; + for (auto dir : dirs) { + pt tmp(real(res.back()) + real(dir), + imag(res.back()) + imag(dir)); + maxX = std::max(maxX, real(tmp)); + maxY = std::max(maxY, imag(tmp)); + res.emplace_back(tmp); + } + res.pop_back(); + for (auto& point : res) { + point = pt(real(point) + dim - 1 - maxX, + imag(point) + dim - 1 - maxY); + } + bool strict = true; + for (int i = 0; i < n; i++) strict &= cross(res[i], res[(i + 1) % n], res[(i + 2) % n]) != 0; + if (strict) return res; + } + } + + vector polygon(int n, ll dim) { + while (true) { + vector ps = points(n, -dim, dim); + bool coolinear = false; + for (int i = 0; i < n; i++) { + for (int j = 0; j < i; j++) { + for (int k = 0; k < j; k++) { + coolinear |= cross(ps[i], ps[j], ps[k]) == 0; + } + } + } + if (coolinear) continue; + + bool changed; + do { + changed = false; + for (int i = 0; i < n && !changed; i++) { + for (int j = i + 1; j < n && !changed; j++) { + if (details::lineSegmentIntersection(ps[i], ps[(i+1) % n], ps[j], ps[(j+1) % n])) { + reverse(ps.begin() + i + 1, ps.begin() + j + 1); + changed = true; + } + } + } + } while (changed); + return ps; + } + } + + pt integerPoint(ll range) { + return pt(integer(-range, range), + integer(-range, range)); + } + + vector integerPoints(std::size_t n, ll range) { + vector res(n); + for (auto& p : res) p = integerPoint(range); + return res; + } + + array line(ll range) { + pt a = integerPoint(range); + pt b = a; + while (b == a) b = integerPoint(range); + return {a, b}; + } + + array triangle(ll range) { + pt a = integerPoint(range); + pt b = a; + while (b == a) b = integerPoint(range); + pt c = a; + while (ccw(a, b, c) == 0) c = integerPoint(range); + return {a, b, c}; + } +} \ No newline at end of file diff --git a/test/geometry/antipodalPoints.cpp b/test/geometry/antipodalPoints.cpp new file mode 100644 index 0000000..d20dfb6 --- /dev/null +++ b/test/geometry/antipodalPoints.cpp @@ -0,0 +1,70 @@ +#include "../util.h" +constexpr ll EPS = 0; +#define double ll +#define polar polar +#include +#undef polar +#undef double +#include +#include "../geometry.h" + +vector> naive(vector ps) { + ll n = sz(ps); + auto test = [&](int i, int j){ + if (dot(ps[j] - ps[i], ps[i - 1] - ps[i]) <= 0) return false; + if (dot(ps[j] - ps[i], ps[i + 1] - ps[i]) <= 0) return false; + return true; + }; + ps.push_back(ps[0]); + ps.push_back(ps[1]); + vector> res; + for (ll i = 1; i <= n; i++) { + for (ll j = 1; j < i; j++) { + if (test(i, j) && test(j, i)) res.emplace_back(i % n, j % n); + } + } + return res; +} + +void stress_test(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(3, 30); + auto ps = Random::convex(n, range); + + auto got = antipodalPoints(ps); + for (auto& [a, b] : got) if (a > b) swap(a, b); + sort(all(got)); + + auto expected = naive(ps); + for (auto& [a, b] : expected) if (a > b) swap(a, b); + + for (auto x : expected) { + auto it = lower_bound(all(got), x); + if (it == got.end() || *it != x) cerr << "error" << FAIL; + } + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 99'000; +void performance_test() { + timer t; + + auto ps = Random::convex(N, 1'000'000'000); + + t.start(); + auto got = antipodalPoints(ps); + t.stop(); + + hash_t hash = sz(got); + if (t.time > 50) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(100); + stress_test(1'000'000'000); + performance_test(); +} diff --git a/test/geometry/circle.cpp b/test/geometry/circle.cpp new file mode 100644 index 0000000..3d3d27d --- /dev/null +++ b/test/geometry/circle.cpp @@ -0,0 +1,116 @@ +#include "../util.h" +constexpr double EPS = 1e-6; +#define ll double +double gcd(double x, double /**/) {return x;} //hacky +#include +#undef ll +#include + +// Entfernung von Punkt p zur Geraden durch a-b. 2d und 3d +double distToLine(pt a, pt b, pt p) { + return abs(cross(p - a, b - a)) / abs(b - a); +} + +pt randomIntegerPT(ll range) { + return pt(Random::integer(-range, range), Random::integer(-range, range)); +} + +ll sq(ll x) { + return x*x; +} + +int expectedCount(ll x1, ll y1, ll r1, ll x2, ll y2, ll r2) { + if (x1 == x2 && y1 == y2){ + return r1 == r2 ? -1 : 0; + } else { + ll d = sq(x1 - x2) + sq(y1 - y2); + + if (d > sq(r1 + r2) || d < sq(r1 - r2)) { + return 0; + } else if (d == sq(r1 + r2) || d == sq(r1 - r2)) { + return 1; + } else{ + return 2; + } + } +} + +void test_circleIntersection(ll range) { + int queries = 0; + for (int tries = 0; tries < 1'000'000; tries++) { + auto c1 = randomIntegerPT(range); + auto c2 = c1; + while (c1 == c2) c2 = randomIntegerPT(range); + double r1 = Random::integer(1, range); + double r2 = Random::integer(1, range); + + auto got = circleIntersection(c1, r1, c2, r2); + + if (sz(got) != expectedCount(real(c1), imag(c1), r1, real(c2), imag(c2), r2)) cerr << "error: wrong count" << FAIL; + + for (int i = 0; i < sz(got); i++) { + for (int j = 0; j < i; j++) { + if (abs(got[i] - got[j]) < 1e-6) cerr << "error: identical" << FAIL; + } + } + + for (auto p : got) { + if (float_error(abs(c1 - p), r1) > 1e-6) cerr << "error: 1" << FAIL; + if (float_error(abs(c2 - p), r2) > 1e-6) cerr << "error: 2" << FAIL; + } + queries += sz(got); + } + cerr << "tested circleIntersection: " << queries << endl; +} + +void test_circleRayIntersection(ll range) { + int queries = 0; + for (int tries = 0; tries < 1'000'000; tries++) { + auto c = randomIntegerPT(range); + double r = Random::integer(1, range); + + pt orig = randomIntegerPT(range); + pt dir = 0; + while (abs(dir) < 0.5) dir = randomIntegerPT(range); + + auto got = circleRayIntersection(c, r, orig, dir); + + double dist = distToLine(orig, orig + dir, c); + int lineIntersections = 0; + if (dist <= r) lineIntersections = 2; + if (abs(dist - r) < 1e-9) lineIntersections = 1; + + int expected = 0; + if (abs(orig - c) < r) expected = 1; //starts inside + if (abs(orig - c) > r) { //starts outside + if (dot(dir, c - orig) >= 0) expected = lineIntersections; + else expected = 0; + } + if (abs(abs(orig - c) - r) < 1e-9) { //starts on circle + if (dot(dir, c - orig) >= 0) expected = lineIntersections; + else expected = 1; + } + + if (sz(got) != expected) cerr << "error: wrong count" << FAIL; + + for (int i = 0; i < sz(got); i++) { + for (int j = 0; j < i; j++) { + if (abs(got[i] - got[j]) < 1e-6) cerr << "error: identical" << FAIL; + } + } + + for (auto p : got) { + if (float_error(abs(c - p), r) > 1e-6) cerr << "error: 1" << FAIL; + if (distToLine(orig, orig + dir, p) > 1e-6) cerr << "error: 2" << FAIL; + } + queries += sz(got); + } + cerr << "tested circleIntersection: " << queries << endl; +} + +int main() { + test_circleIntersection(10); + test_circleIntersection(100); + test_circleRayIntersection(10); + test_circleRayIntersection(100); +} diff --git a/test/geometry/closestPair.cpp b/test/geometry/closestPair.cpp new file mode 100644 index 0000000..5959b21 --- /dev/null +++ b/test/geometry/closestPair.cpp @@ -0,0 +1,69 @@ +#include "../util.h" +constexpr ll EPS = 0; +#define double ll +#define polar polar +#include +#undef polar +#undef double +constexpr ll INF = LL::INF; +ll sq(ll x) {return x*x;} +ll isqrt(ll x) {return (ll)sqrtl(x);} +#include + +//strict convex hull +ll naive(const vector& ps) { + ll opt = LL::INF; + for (ll i = 0; i < sz(ps); i++) { + for (ll j = 0; j < i; j++) { + opt = min(opt, norm(ps[i] - ps[j])); + } + } + return opt; +} + +void stress_test(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(2, 100); + auto ps = Random::points(n, -range, range); + auto got = shortestDist(ps); + auto expected = naive(ps); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + hash_t hash = 0; + double maxTime = 0; + + vector ps; + for (int i = 0; i*i <= N; i++) { + for (int j = 0; j*j <= N; j++) { + ps.emplace_back(i, j); + } + } + t.start(); + hash = shortestDist(ps); + t.stop(); + maxTime = max(maxTime, t.time); + + ps = Random::points(N, -1'000'000'000, 1'000'000'000); + t.reset(); + t.start(); + hash += shortestDist(ps); + t.stop(); + maxTime = max(maxTime, t.time); + + if (maxTime > 500) cerr << "too slow: " << maxTime << FAIL; + cerr << "tested performance: " << maxTime << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(100); + stress_test(1'000'000'000); + performance_test(); +} diff --git a/test/geometry/closestPair.double.cpp b/test/geometry/closestPair.double.cpp new file mode 100644 index 0000000..2f8a1ab --- /dev/null +++ b/test/geometry/closestPair.double.cpp @@ -0,0 +1,66 @@ +#include "../util.h" +constexpr double EPS = 1e-9; +#define ll double +double gcd(double x, double /**/) {return x;} //hacky +#include +constexpr ll INF = LL::INF; +#include +#undef ll + +//strict convex hull +double naive(const vector& ps) { + double opt = LL::INF; + for (ll i = 0; i < sz(ps); i++) { + for (ll j = 0; j < i; j++) { + opt = min(opt, norm(ps[i] - ps[j])); + } + } + return opt; +} + +void stress_test(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(2, 100); + auto ps = Random::points(n, -range, range); + auto got = shortestDist(ps); + auto expected = naive(ps); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + hash_t hash = 0; + double maxTime = 0; + + vector ps; + for (int i = 0; i*i <= N; i++) { + for (int j = 0; j*j <= N; j++) { + ps.emplace_back(i, j); + } + } + t.start(); + hash = shortestDist(ps); + t.stop(); + maxTime = max(maxTime, t.time); + + ps = Random::points(N, -1'000'000'000, 1'000'000'000); + t.reset(); + t.start(); + hash += shortestDist(ps); + t.stop(); + maxTime = max(maxTime, t.time); + + if (maxTime > 500) cerr << "too slow: " << maxTime << FAIL; + cerr << "tested performance: " << maxTime << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(100); + stress_test(1'000'000'000); + performance_test(); +} diff --git a/test/geometry/convexHull.cpp b/test/geometry/convexHull.cpp new file mode 100644 index 0000000..788a634 --- /dev/null +++ b/test/geometry/convexHull.cpp @@ -0,0 +1,79 @@ +#include "../util.h" +constexpr ll EPS = 0; +#define double ll +#define polar polar +#include +#undef polar +#undef double +#include + +//strict convex hull +ll isConvexHull(const vector& ps, const vector& hull) { + ll n = sz(hull) - 1; + if (n == 0) { + for (pt p : ps) if (p != hull[0]) return 1; + return 0; + } else { + if (hull[0] != hull[n]) return 2; + //hull has no duplicates + for (ll i = 0; i < n; i++) { + for (ll j = 0; j < i; j++) { + if (hull[i] == hull[j]) return 3; + } + } + //hull is subset + for (pt p : hull) { + bool isP = false; + for (pt c : ps) isP |= c == p; + if (!isP) return 4; + } + //hull contains all points + for (pt p : hull) { + ll mi = 1; + for (ll i = 0; i < n; i++) { + mi = min(mi, cross(hull[i], hull[i + 1], p)); + } + if (mi < 0) return 5; //outside + if (mi > 0) continue; + bool isCorner = 4; + for (pt c : hull) isCorner |= c == p; + if (!isCorner) return 6; + } + // hull is convex + if (n <= 2) return 0; + for (ll i = 0; i < n; i++) { + if (cross(hull[i], hull[i + 1], hull[(i + 2) % n]) <= 0) return 7; + } + return 0; + } +} + +void stress_test(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(1, 100); + auto ps = Random::points(n, -range, range); + auto got = convexHull(ps); + if (isConvexHull(ps, got) > 0) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 2'000'000; +void performance_test() { + timer t; + auto ps = Random::points(N, -1'000'000'000, 1'000'000'000); + t.start(); + auto a = convexHull(ps); + t.stop(); + hash_t hash = sz(a); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(100); + stress_test(1'000'000'000); + performance_test(); +} diff --git a/test/geometry/delaunay.cpp b/test/geometry/delaunay.cpp new file mode 100644 index 0000000..7f8ec30 --- /dev/null +++ b/test/geometry/delaunay.cpp @@ -0,0 +1,144 @@ +#include "../util.h" +using pt = complex; +// Kreuzprodukt, 0, falls kollinear. +auto cross(pt a, pt b) {return imag(conj(a) * b);} +auto cross(pt p, pt a, pt b) {return cross(a - p, b - p);} +#pragma GCC diagnostic ignored "-Wunused-variable" +#include + +vector convexHull(vector pts){ + sort(all(pts), [](const pt& a, const pt& b){ + return real(a) == real(b) ? imag(a) < imag(b) + : real(a) < real(b); + }); + pts.erase(unique(all(pts)), pts.end()); + int k = 0; + vector h(2 * sz(pts)); + auto half = [&](auto begin, auto end, int t) { + for (auto it = begin; it != end; it++) { + while (k > t && cross(h[k-2], h[k-1], *it) < 0) k--;//allow collinear points! + h[k++] = *it; + }}; + half(all(pts), 1);// Untere Hülle. + half(next(pts.rbegin()), pts.rend(), k);// Obere Hülle. + h.resize(k); + return h; +} + +lll area(const vector& poly) { //poly[0] == poly.back() + lll res = 0; + for (int i = 0; i + 1 < sz(poly); i++) + res += cross(poly[i], poly[i + 1]); + return res; +} + +// Liegt p auf der Strecke a-b? +bool pointInLineSegment(pt a, pt b, pt p) { + if (cross(a, b, p) != 0) return false; + auto dist = norm(a - b); + return norm(a - p) < dist && norm(b - p) < dist; +} + +// Test auf Streckenschnitt zwischen a-b und c-d. +// (nur intern) +bool lineSegmentIntersection(pt a, pt b, pt c, pt d) { + if (cross(a, b, c) == 0 && cross(a, b, d) == 0) { + return pointInLineSegment(a,b,c) || + pointInLineSegment(a,b,d) || + pointInLineSegment(c,d,a) || + pointInLineSegment(c,d,b); + } + return cross(a, b, c) * cross(a, b, d) < 0 && + cross(c, d, a) * cross(c, d, b) < 0; +} + +// 1 => c links von a->b +// 0 => a, b und c kolliniear +// -1 => c rechts von a->b +int ccw(pt a, pt b, pt c) { + auto orien = cross(b - a, c - a); + return (orien > 0) - (orien < 0); +} + +bool inOutCirc(pt a, pt b, pt c, pt p) { + lll p2 = norm(p); + lll A = norm(a)-p2; + lll B = norm(b)-p2; + lll C = norm(c)-p2; + return ccw(a, b, c) * (cross(p, a, b)*C + cross(p, b, c)*A + cross(p, c, a)*B) > 0; +} + + +void stress_test(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(3, 30); + auto ps = Random::points(n, -range, range); + bool skip = true; + for (int i = 2; i < n; i++) skip &= cross(ps[i-2], ps[i-1], ps[i]) == 0; + if (skip) continue; + for (int i = 0; i < n; i++) { + for (int j = 0; j < i; j++) { + skip |= ps[i] == ps[j]; + } + } + if (skip) continue; + + auto hull = convexHull(ps); + lll expectedArea = area(hull); + hull.pop_back(); + + auto got = delaunay(ps); + if (sz(got) % 3 != 0) cerr << "error: not triangles" << FAIL; + if (sz(got) / 3 + sz(hull) - 3 + 1 != 2 * sz(ps) - 4) cerr << "error: wrong number" << FAIL; + + //all triangles should be oriented ccw + lll gotArea = 0; + for (int i = 0; i < sz(got); i += 3) gotArea += cross(got[i], got[i+1], got[i+2]); + if (gotArea != expectedArea) cerr << "error: wrong area" << FAIL; + + for (int i = 0; i < sz(got); i++) { + int ii = i + 1; + if (i / 3 != ii / 3) ii -= 3; + for (int j = 0; j < i; j++) { + int jj = j + 1; + if (j / 3 != jj / 3) jj -= 3; + + if (got[i] == got[j] && got[ii] == got[jj]) cerr << "error: dublicate" << FAIL; + if (lineSegmentIntersection(got[i], got[ii], got[j], got[jj])) cerr << "error: intersection" << FAIL; + } + bool seen = false; + for (pt p : ps) seen |= p == got[i]; + if (!seen) cerr << "error: invalid point" << FAIL; + } + for (int i = 0; i < sz(got); i += 3) { + for (pt p : ps) { + if (p == got[i]) continue; + if (p == got[i+1]) continue; + if (p == got[i+2]) continue; + if (inOutCirc(got[i], got[i+1], got[i+2], p)) cerr << "error: not delaunay" << FAIL; + } + } + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 100'000; +void performance_test() { + timer t; + auto ps = Random::points(N, -1'000'000'000, 1'000'000'000); + t.start(); + auto got = delaunay(ps); + t.stop(); + hash_t hash = sz(got); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(10); + stress_test(10'000); + stress_test(1'000'000'000); + performance_test(); +} diff --git a/test/geometry/formulas.cpp b/test/geometry/formulas.cpp new file mode 100644 index 0000000..d63d431 --- /dev/null +++ b/test/geometry/formulas.cpp @@ -0,0 +1,127 @@ +#include "../util.h" +constexpr ll EPS = 0; +#define double ll +#define polar polar +#include +#undef polar +#undef double + +void test_dot(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto p = Random::point(-range, range); + auto q = Random::point(-range, range); + + ll expected = real(p) * real(q) + imag(p) * imag(q); + ll got = dot(p, q); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries++; + } + cerr << "tested dot: " << queries << endl; +} + +void test_norm(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto p = Random::point(-range, range); + + ll expected = real(p) * real(p) + imag(p) * imag(p); + ll got = norm(p); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries++; + } + cerr << "tested norm: " << queries << endl; +} + +void test_cross(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto p = Random::point(-range, range); + auto q = Random::point(-range, range); + + ll expected = real(p) * imag(q) - imag(p) * real(q); + ll got = cross(p, q); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries++; + } + cerr << "tested cross1: " << queries << endl; + + queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto a = Random::point(-range, range); + auto b = Random::point(-range, range); + auto c = Random::point(-range, range); + + ll expected = cross(b - a, c - a); + ll got = cross(a, b, c); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries++; + } + cerr << "tested cross2: " << queries << endl; +} + +void test_ccw(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto a = Random::point(-range, range); + auto b = Random::point(-range, range); + auto c = Random::point(-range, range); + + ll expected = cross(a, b, c); + if (expected < 0) expected = -1; + if (expected > 0) expected = 1; + ll got = ccw(a, b, c); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries++; + } + cerr << "tested ccw: " << queries << endl; +} + +void test_isCoplanar(ll range) {(void) range;}// cant check this... + +void test_uniqueAngle(ll range) { + auto lessPt = [](pt a, pt b){ + if (real(a) != real(b)) return real(a) < real(b); + return imag(a) < imag(b); + }; + map seen(lessPt); + + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + pt expected = Random::point(-sqrt(range), sqrt(range)); + ll g = abs(gcd(real(expected), imag(expected))); + if (g == 0) continue; + expected /= g; + + pt rot = Random::point(-sqrt(range), sqrt(range)); + if (norm(rot) == 0) continue; + + pt got = uniqueAngle(expected * rot, pt(Random::integer(1, sqrt(range)), 0) * rot); + auto it = seen.emplace(got, expected).first; + + if (it->second != expected) cerr << "error: inconsistent" << FAIL; + queries++; + } + cerr << "tested uniqueAngle: " << queries << " (" << sz(seen) << ")" << endl; +} + +int main() { + test_dot(100); + test_dot(1'000'000'000); + test_norm(100); + test_norm(1'000'000'000); + test_cross(100); + test_cross(1'000'000'000); + test_ccw(100); + test_ccw(1'000'000'000); + test_isCoplanar(100); + test_isCoplanar(1'000'000'000); + test_uniqueAngle(100); + test_uniqueAngle(10'000); + test_uniqueAngle(1'000'000'000); +} diff --git a/test/geometry/linesAndSegments.cpp b/test/geometry/linesAndSegments.cpp new file mode 100644 index 0000000..2943a67 --- /dev/null +++ b/test/geometry/linesAndSegments.cpp @@ -0,0 +1,240 @@ +#include "../util.h" +constexpr double EPS = 1e-9; +#define ll double +double gcd(double x, double /**/) {return x;} //hacky +#include +#undef ll +#include + +#include "../geometry.h" + +void stress_pointOnLine(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto [a, b] = Random::line(range); + pt p = Random::integerPoint(range); + + bool expected = ccw(a, b, p) == 0; + bool got = pointOnLine(a, b, p); + + if (got != expected) cerr << "error" << FAIL; + queries++; + } + cerr << "tested pointOnLine: " << queries << endl; +} + +void stress_lineIntersection(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto [a, b] = Random::line(range); + auto [c, d] = Random::line(range); + if (ccw(a, b, c) == 0 && ccw(a, b, d) == 0) continue; + + bool expected = ccw(0, a-b, c-d) == 0; + bool got = lineIntersection(a, b, c, d); + + if (got != expected) cerr << "error" << FAIL; + queries++; + } + cerr << "tested lineIntersection: " << queries << endl; +} + +void stress_lineIntersection2(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto [a, b] = Random::line(range); + auto [c, d] = Random::line(range); + if (ccw(0, a-b, c-d) == 0) continue; + + auto got = lineIntersection2(a, b, c, d); + + if (distToLine(a, b, got) > 1e-6) cerr << "error: 1" << FAIL; + if (distToLine(a, b, got) > 1e-6) cerr << "error: 2" << FAIL; + queries++; + } + cerr << "tested lineIntersection2: " << queries << endl; +} + +void stress_distToLine(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto [a, b] = Random::line(range); + pt p = Random::integerPoint(range); + + auto got = distToLine(a, b, p); + auto expected = abs(p - projectToLine(a, b, p)); + + if (float_error(got, expected) > 1e-6) cerr << "error" << FAIL; + + queries++; + } + cerr << "tested distToLine: " << queries << endl; +} + +void stress_projectToLine(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto [a, b] = Random::line(range); + pt p = Random::integerPoint(range); + + auto got = projectToLine(a, b, p); + + if (distToLine(a, b, got) > 1e-6) cerr << "error: 1" << FAIL; + if (dot((b-a)/abs(b-a), (got-p)/abs(got-p)) > 1e-6) cerr << "error: 2" << FAIL; + + queries++; + } + cerr << "tested projectToLine: " << queries << endl; +} + +void stress_sortLine(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + pt dir = 0; + while (norm(dir) == 0) dir = Random::integerPoint(range); + int n = Random::integer(1, 30); + vector ps = Random::integerPoints(n, range); + + sortLine(dir, ps); + + for (int i = 1; i < n; i++) { + if (dot(dir, ps[i-1]) > dot(dir, ps[i])) cerr << "error" << FAIL; + } + queries+=n; + } + cerr << "tested sortLine: " << queries << endl; +} + +void stress_pointOnSegment(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto [a, b] = Random::line(range); + pt p = Random::integerPoint(range); + + bool expected = pointOnLine(a, b, p) && abs(a-p) <= abs(a-b) && abs(b-p) <= abs(a-b); + bool got = pointOnSegment(a, b, p); + + if (got != expected) cerr << "error" << FAIL; + queries++; + } + cerr << "tested pointOnSegment: " << queries << endl; +} + +void stress_distToSegment(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto [a, b] = Random::line(range); + pt p = Random::integerPoint(range); + + double expected = min(abs(p-a), abs(p-b)); + if (dot(b-a,p-a) >= 0 && dot(a-b,p-b) >= 0) expected = min(expected, distToLine(a, b, p)); + double got = distToSegment(a, b, p); + + if (float_error(got, expected) > 1e-6) cerr << "error" << FAIL; + queries++; + } + cerr << "tested distToSegment: " << queries << endl; +} + +void stress_segmentIntersection(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto [a, b] = Random::line(range); + auto [c, d] = Random::line(range); + + bool expected; + if (ccw(a, b, c) == 0 && ccw(a, b, d) == 0) { + expected = pointOnSegment(a,b,c) || + pointOnSegment(a,b,d) || + pointOnSegment(c,d,a) || + pointOnSegment(c,d,b); + } else { + expected = ccw(a, b, c) * ccw(a, b, d) <= 0 && + ccw(c, d, a) * ccw(c, d, b) <= 0; + } + bool got = segmentIntersection(a, b, c, d); + + if (got != expected) cerr << "error" << FAIL; + queries++; + } + cerr << "tested segmentIntersection: " << queries << endl; +} + +void stress_segmentIntersection2(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto [a, b] = Random::line(range); + auto [c, d] = Random::line(range); + + auto got = segmentIntersection2(a, b, c, d); + auto tmp = segmentIntersection(a, b, c, d); + + if (!got.empty() != tmp) cerr << "error: 1" << FAIL; + for (pt p : got) { + if (distToSegment(a, b, p) > 1e-6) cerr << "error: 2" << FAIL; + if (distToSegment(a, b, p) > 1e-6) cerr << "error: 3" << FAIL; + } + if (tmp) { + double gotDist = abs(got.front() - got.back()); + double expectedDist = 0; + array tmp2 = {a, b, c, d}; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < i; j++) { + if (!pointOnSegment(a, b, tmp2[i])) continue; + if (!pointOnSegment(c, d, tmp2[i])) continue; + if (!pointOnSegment(a, b, tmp2[j])) continue; + if (!pointOnSegment(c, d, tmp2[j])) continue; + expectedDist = max(expectedDist, abs(tmp2[i] - tmp2[j])); + } + } + if (float_error(gotDist, expectedDist) > 1e-6) cerr << "error: 4" << FAIL; + } + queries++; + } + cerr << "tested segmentIntersection2: " << queries << endl; +} + +void stress_distBetweenSegments(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + auto [a, b] = Random::line(range); + auto [c, d] = Random::line(range); + + double expected = 0; + if (!segmentIntersection(a, b, c, d)) { + expected = min({distToSegment(a, b, c), distToSegment(a, b, d), + distToSegment(c, d, a), distToSegment(c, d, b)}); + } + double got = distBetweenSegments(a, b, c, d); + + if (float_error(got, expected) > 1e-6) cerr << "error" << FAIL; + queries++; + } + cerr << "tested distBetweenSegments: " << queries << endl; +} + +int main() { + stress_pointOnLine(100); + stress_pointOnLine(10'000); + stress_pointOnLine(1'000'000'000); + stress_lineIntersection(100); + stress_lineIntersection(1'000'000'000); + stress_lineIntersection2(100); + stress_lineIntersection2(1'000'000); + stress_distToLine(100); + stress_distToLine(1'000'000'000); + stress_projectToLine(100); + stress_projectToLine(1'000'000); + stress_sortLine(100); + stress_sortLine(1'000'000'000); + stress_pointOnSegment(100); + stress_pointOnSegment(1'000'000'000); + stress_distToSegment(100); + stress_distToSegment(1'000'000'000); + stress_segmentIntersection(100); + stress_segmentIntersection(1'000'000'000); + stress_segmentIntersection2(100); + stress_segmentIntersection2(1'000'000'000); + stress_distBetweenSegments(100); + stress_distBetweenSegments(1'000'000'000); +} diff --git a/test/geometry/polygon.cpp b/test/geometry/polygon.cpp new file mode 100644 index 0000000..1dd46ca --- /dev/null +++ b/test/geometry/polygon.cpp @@ -0,0 +1,296 @@ +#include "../util.h" +constexpr ll EPS = 0; +constexpr double INF = LD::INF; +#define double ll +#define polar polar +#include +#undef polar +#undef double +double abs(pt p) { + return hypot(real(p), imag(p)); +} +// Liegt p auf der Strecke a-b? +bool pointOnLineSegment(pt a, pt b, pt p) { + if (cross(a, b, p) != 0) return false; + auto dist = norm(a - b); + return norm(a - p) <= dist && norm(b - p) <= dist; +} +// Entfernung von Punkt p zur Strecke a-b. +double distToSegment(pt a, pt b, pt p) { + if (a == b) return abs(p - a); + if (dot(p - a, b - a) <= 0) return abs(p - a); + if (dot(p - b, b - a) >= 0) return abs(p - b); + return abs(cross(p - a, b - a)) / abs(b - a); +} +#pragma GCC diagnostic ignored "-Wunused-variable" +#include +#include "../geometry.h" + +void test_area(ll range) { + int queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(3, 30); + auto ps = Random::polygon(n, range); + ps.push_back(ps[0]); + + ll expected = 0; + for (int i = 0; i < n; i++) { + expected += cross(0, ps[i], ps[i+1]); + } + double got = area(ps) * 2; + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested area: " << queries << endl; +} + +bool ptLess(pt a, pt b) { + if (real(a) != real(b)) return real(a) < real(b); + return imag(a) < imag(b); +} + +void test_windingNumber(ll range) { + int queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(3, 8); + auto ps = Random::polygon(n, range); + ps.push_back(ps[0]); + + for (int i = 0; i < 100; i++) { + auto p = Random::point(-range, range); + + ll expected = 0; + bool onBorder = false; + for (int j = 0; j < n; j++) { + int cur = details::lineSegmentIntersection(p, p + pt(1, 2'000'000'007), ps[j], ps[j+1]); + if (ptLess(ps[j], ps[j+1])) expected -= cur; + else expected += cur; + onBorder |= pointOnLineSegment(ps[j], ps[j+1], p); + } + if (onBorder) continue; + if (area(ps) < 0) expected = -expected; + + bool got = windingNumber(p, ps); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + queries += n; + } + cerr << "tested windingNumber: " << queries << endl; +} + +void test_inside(ll range) { + int queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(3, 30); + auto ps = Random::polygon(n, range); + ps.push_back(ps[0]); + + for (int i = 0; i < 100; i++) { + auto p = Random::point(-range, range); + + ll count = 0; + bool onBorder = false; + for (int j = 0; j < n; j++) { + count += details::lineSegmentIntersection(p, p + pt(1, 2'000'000'007), ps[j], ps[j+1]); + onBorder |= pointOnLineSegment(ps[j], ps[j+1], p); + } + bool expected = (count % 2) && !onBorder; + bool got = inside(p, ps); + + if (got != expected) cerr << "error" << FAIL; + } + queries += n; + } + cerr << "tested inside: " << queries << endl; +} + +void test_insideConvex(ll range) { + int queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(3, 30); + auto ps = Random::convex(n, range); + + for (int i = 0; i < 100; i++) { + auto p = Random::point(-range, range); + + bool expected = true; + for (int j = 0; j < n; j++) expected &= cross(p, ps[j], ps[(j+1) % n]) > 0; + + bool got = insideConvex(p, ps); + + if (got != expected) { + for (pt pp : ps) cerr << pp << " "; + cerr << endl; + cerr << p << endl; + } + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + queries += n; + } + cerr << "tested insideConvex: " << queries << endl; +} + +// convex hull without duplicates, h[0] != h.back() +// apply comments if border counts as inside +bool insideOrOnConvex(pt p, const vector& hull) { + int l = 0, r = sz(hull) - 1; + if (cross(hull[0], hull[r], p) > 0) return false; + while (l + 1 < r) { + int m = (l + r) / 2; + if (cross(hull[0], hull[m], p) >= 0) l = m; + else r = m; + } + return cross(hull[l], hull[r], p) >= 0; +} + +void test_minkowski(ll range) { + int queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(3, 30); + auto A = Random::convex(n, range); + int m = Random::integer(3, 30); + auto B = Random::convex(n, range); + + auto got = minkowski(A, B); + bool convex = true; + for (int i = 0; i < sz(got); i++) convex &= cross(got[i], got[(i+1) % sz(got)], got[(i+2) % sz(got)]) >= 0; + if (!convex) cerr << "error: not convex" << FAIL; + + for (pt a : A) { + for (pt b : B) { + if (!insideOrOnConvex(a + b, got)) cerr << "error: not sum" << FAIL; + } + } + queries += n + m; + } + cerr << "tested minkowski: " << queries << endl; +} + +double naive_dist(const vector& ps, const vector& qs) { + //check if intersect + double res = LD::INF; + bool intersect = true; + for (int i = 0; i < sz(qs); i++) { + bool sep = true; + for (pt p : ps) { + res = min(res, distToSegment(qs[i], qs[(i+1) % sz(qs)], p)); + sep &= cross(qs[i], qs[(i+1) % sz(qs)], p) <= 0; + } + if (sep) intersect = false; + } + for (int i = 0; i < sz(ps); i++) { + bool sep = true; + for (pt q : qs) { + res = min(res, distToSegment(ps[i], ps[(i+1) % sz(ps)], q)); + sep &= cross(ps[i], ps[(i+1) % sz(ps)], q) <= 0; + } + if (sep) intersect = false; + } + if (intersect) return 0; + return res; +} + +void test_dist(ll range) { + int queries = 0; + int pos = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(3, 10); + auto A = Random::convex(n, range / 3); + int m = Random::integer(3, 10); + auto B = Random::convex(n, range / 3); + + pt offset = Random::point(range / 3, 2 * range / 3); + for (pt& p : B) p += offset; + + auto got = dist(A, B); + auto expected = naive_dist(A, B); + + if (float_error(got, expected) > 1e-6) cerr << "got: " << got << ", expected: " << expected << FAIL; + if (got > 0) pos++; + + queries += n + m; + } + cerr << "tested dist: " << queries << " (" << pos << ")" << endl; +} + +void test_extremal(ll range) { + int queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(3, 30); + auto ps = Random::convex(n, range); + ps.push_back(ps[0]); + + for (int i = 0; i < 100; i++) { + auto dir = Random::point(-range, range); + int tmp = extremal(ps, dir); + if (tmp < 0 || tmp >= n) cerr << "error: out of range" << FAIL; + + auto got = ps[tmp]; + bool extremal = true; + for (pt p : ps) extremal &= dot(dir, p) <= dot(dir, got); + + if (!extremal) cerr << "error: not extremal" << FAIL; + queries += n; + } + } + cerr << "tested extremal: " << queries << endl; +} + +void test_intersect(ll range) { + int queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(3, 10); + auto ps = Random::convex(n, range); + ps.push_back(ps[0]); + + for (int i = 0; i < 100; i++) { + pt a = Random::point(-range, range); + pt b = a; + while (b == a) b = Random::point(-range, range); + + auto got = intersectLine(ps, a, b); + + vector expected; + for (int j = 0; j < n; j++) { + if (cross(ps[j], a, b) > 0 && cross(ps[j+1], a, b) < 0) expected.push_back(j); + if (cross(ps[j], a, b) < 0 && cross(ps[j+1], a, b) > 0) expected.push_back(j); + if (cross(ps[j], a, b) == 0) { + if (cross(ps[j+1], a, b) != 0 || + cross(ps[(j+n-1) % n], a, b) != 0) { + expected.push_back(j); + } + } + } + if (sz(expected) > 1 && expected[0] == expected[1]) expected.pop_back(); + + sort(all(got)); + sort(all(expected)); + + if (got != expected) cerr << "error" << FAIL; + + queries += n; + } + } + cerr << "tested intersect: " << queries << endl; +} + +int main() { + test_area(100); + test_area(1'000'000'000); + test_windingNumber(100); + test_windingNumber(1'000'000'000); + test_inside(100); + test_inside(1'000'000'000); + test_insideConvex(100); + test_insideConvex(1'000'000'000); + test_minkowski(100); + test_minkowski(500'000'000); + test_dist(100); + test_dist(1'000'000'000); + test_extremal(100); + test_extremal(1'000'000'000); + test_intersect(100); + test_intersect(1'000'000'000); +} diff --git a/test/geometry/segmentIntersection.cpp b/test/geometry/segmentIntersection.cpp new file mode 100644 index 0000000..9862be5 --- /dev/null +++ b/test/geometry/segmentIntersection.cpp @@ -0,0 +1,88 @@ +#include "../util.h" +constexpr ll EPS = 0; +#define double ll +#define polar polar +#include +#undef polar +#undef double + +// Liegt p auf der Strecke a-b? +bool pointOnLineSegment(pt a, pt b, pt p) { + if (cross(a, b, p) != 0) return false; + double dist = norm(a - b); + return norm(a - p) <= dist && norm(b - p) <= dist; +} + +// Test auf Streckenschnitt zwischen a-b und c-d. +bool lineSegmentIntersection(pt a, pt b, pt c, pt d) { + if (ccw(a, b, c) == 0 && ccw(a, b, d) == 0) + return pointOnLineSegment(a,b,c) || + pointOnLineSegment(a,b,d) || + pointOnLineSegment(c,d,a) || + pointOnLineSegment(c,d,b); + return ccw(a, b, c) * ccw(a, b, d) <= 0 && + ccw(c, d, a) * ccw(c, d, b) <= 0; +} + +#include + +vector randomSegs(int n, ll range) { + auto ps = Random::points(n, -range, range); + vector segs(n); + for (int i = 0; i < n; i++) { + pt b; + do { + b = Random::point(-pow(range, 0.8), pow(range, 0.8)); + } while(norm(b) == 0); + segs[i] = {ps[i], ps[i] + b, i}; + } + return segs; +} + +bool naive(vector& segs) { + for (ll i = 0; i < sz(segs); i++) { + for (ll j = 0; j < i; j++) { + if (lineSegmentIntersection(segs[i].a, segs[i].b, segs[j].a, segs[j].b)) return true; + } + } + return false; +} + +void stress_test(ll range) { + ll queries = 0; + ll intersection = 0; + ll notIntersection = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(2, 100); + auto segs = randomSegs(n, range); + auto [a, b] = intersect(segs); + bool got = a >= 0; + if (got != (b >= 0)) cerr << "error: invalid ans" << FAIL; + auto expected = naive(segs); + if (got != expected) cerr << "error: intersection not found" << FAIL; + if (got && !lineSegmentIntersection(segs[a].a, segs[a].b, segs[b].a, segs[b].b)) cerr << "error: no intersection" << FAIL; + queries += n; + intersection += got; + notIntersection += !got; + } + cerr << "tested random queries: " << queries << "(" << intersection << ":" << notIntersection << ")" << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + auto segs = randomSegs(N, 1'000'000'000); + + t.start(); + hash_t hash = intersect(segs).first; + t.stop(); + + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(100); + stress_test(1'000'000'000); + performance_test(); +} diff --git a/test/geometry/sortAround.cpp b/test/geometry/sortAround.cpp new file mode 100644 index 0000000..a27edc8 --- /dev/null +++ b/test/geometry/sortAround.cpp @@ -0,0 +1,83 @@ +#include "../util.h" +constexpr ll EPS = 0; +#define double ll +#define polar polar +#include +#undef polar +#undef double +#include + +//expected order: +//1 8 7 +//2 . 6 +//3 4 5 +void test_tiny() { + vector expected = { + {-1, 1}, + {-1, 0}, + {-1,-1}, + { 0,-1}, + { 1,-1}, + { 1, 0}, + { 1, 1}, + { 0, 1}, + }; + auto got = expected; + for (int i = 0; i < 100'000; i++) { + shuffle(all(got), Random::rng); + sortAround(0, got); + if (got != expected) cerr << "error" << FAIL; + } + cerr << "tested tiny" << endl; +} + +void stress_test(ll range) { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(2, 100); + auto ps = Random::points(n, -range, range); + + auto contains = [&](pt p){ + for (pt pp : ps) if (pp == p) return true; + return false; + }; + + pt c; + do { + c = Random::point(-range, range); + } while (contains(c)); + + sortAround(c, ps); + + auto isLeft = [&](pt p){return real(p - c) < 0 || (real(p - c) == 0 && imag(p - c) < 0);}; + auto isCCW = [&](pt a, pt b){return cross(c, a, b) > 0;}; + if (!is_partitioned(all(ps), isLeft)) cerr << "error 1" << FAIL; + auto mid = partition_point(all(ps), isLeft); + if (!is_sorted(ps.begin(), mid, isCCW)) cerr << "error 2" << FAIL; + if (!is_sorted(mid, ps.end(), isCCW)) cerr << "error 3" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 2'000'000; +void performance_test() { + timer t; + auto ps = Random::points(N, -1'000'000'000, 1'000'000'000); + + t.start(); + sortAround(0, ps); + t.stop(); + + hash_t hash = 0; + for (pt p : ps) hash += real(p) * imag(p); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + test_tiny(); + stress_test(100); + stress_test(1'000'000'000); + performance_test(); +} diff --git a/test/geometry/triangle.cpp b/test/geometry/triangle.cpp new file mode 100644 index 0000000..dc620ee --- /dev/null +++ b/test/geometry/triangle.cpp @@ -0,0 +1,146 @@ +#include "../util.h" +constexpr double EPS = 1e-6; +#define ll double +double gcd(double x, double /**/) {return x;} //hacky +#include +#undef ll +ll sgn(double x) { + return (x > EPS) - (x < -EPS); +} +#include +#include "../geometry.h" + +// Entfernung von Punkt p zur Geraden durch a-b. 2d und 3d +double distToLine(pt a, pt b, pt p) { + return abs(cross(p - a, b - a)) / abs(b - a); +} + +void test_centroid(ll range) { + int queries = 0; + for (int tries = 0; tries < 1'000'000; tries++) { + auto [a, b, c] = Random::triangle(range); + + pt center = centroid(a, b, c); + + if (distToLine(2.0*a, c+b, 2.0*center) > 1e-6) cerr << "error: 1" << FAIL; + if (distToLine(2.0*b, c+a, 2.0*center) > 1e-6) cerr << "error: 2" << FAIL; + if (distToLine(2.0*c, a+b, 2.0*center) > 1e-6) cerr << "error: 3" << FAIL; + queries++; + } + cerr << "tested centroid: " << queries << endl; +} + +void test_area(ll range) { + int queries = 0; + for (int tries = 0; tries < 1'000'000; tries++) { + auto [a, b, c] = Random::triangle(range); + + auto gotA = 2*area(a, b, c); + auto gotB = 2*area(abs(a-b), abs(b-c), abs(c-a)); + auto expected = llround(gotA); + + if (float_error(gotA, expected) > 1e-6) cerr << "error: 1" << FAIL; + if (float_error(gotB, expected) > 1e-3) cerr << "error: 2" << FAIL; + queries++; + } + cerr << "tested area: " << queries << endl; +} + +void test_inCenter(ll range) { + int queries = 0; + for (int tries = 0; tries < 1'000'000; tries++) { + auto [a, b, c] = Random::triangle(range); + + pt center = inCenter(a, b, c); + + double da = distToLine(a, b, center); + double db = distToLine(b, c, center); + double dc = distToLine(c, a, center); + + double avg = (da + db + dc) / 3.0; + + if (float_error(da, avg) > 1e-6) cerr << "error: 1" << FAIL; + if (float_error(db, avg) > 1e-6) cerr << "error: 2" << FAIL; + if (float_error(dc, avg) > 1e-6) cerr << "error: 3" << FAIL; + queries++; + } + cerr << "tested inCenter: " << queries << endl; +} + +void test_circumCenter(ll range) { + int queries = 0; + for (int tries = 0; tries < 1'000'000; tries++) { + auto [a, b, c] = Random::triangle(range); + + pt center = circumCenter(a, b, c); + + double da = abs(center - a); + double db = abs(center - b); + double dc = abs(center - c); + + double avg = (da + db + dc) / 3.0; + + if (float_error(da, avg) > 1e-6) cerr << "error: 1" << FAIL; + if (float_error(db, avg) > 1e-6) cerr << "error: 2" << FAIL; + if (float_error(dc, avg) > 1e-6) cerr << "error: 3" << FAIL; + queries++; + } + cerr << "tested circumCenter: " << queries << endl; +} + +void test_insideOutCenter(ll range) { + int queries = 0; + for (int tries = 0; tries < 1'000'000; tries++) { + auto [a, b, c] = Random::triangle(range); + pt p = Random::integerPoint(range); + + pt center = circumCenter(a, b, c); + + double da = abs(center - a); + double db = abs(center - b); + double dc = abs(center - c); + double dp = abs(center - p); + + double avg = (da + db + dc) / 3.0; + + int expected = dp < avg ? 1 : -1; + if (float_error(dp, avg) < 1e-9) expected = 0; + + if (insideOutCenter(a, b, c, p) != expected) cerr << "error" << FAIL; + + queries++; + } + cerr << "tested insideOutCenter: " << queries << endl; +} + +void test_similar(ll range) { + int queries = 0; + for (int tries = 0; tries < 1'000'000; tries++) { + auto [a, b, c] = Random::triangle(sqrt(range)); + pt rot = Random::integerPoint(sqrt(range)); + pt add = Random::integerPoint(range); + + pt d = rot * a + add; + pt e = rot * b + add; + pt f = rot * c + add; + + if (!similar(a, b, c, d, e, f)) cerr << "error" << FAIL; + queries++; + } + cerr << "tested similar: " << queries << endl; +} + +int main() { + test_centroid(100); + test_centroid(1'000'000'000); + test_area(100); + test_area(1'000'000'000); + test_inCenter(100); + test_inCenter(1'000'000'000); + test_circumCenter(100); + test_circumCenter(1'000'000'000); + test_insideOutCenter(100); + test_insideOutCenter(1'000'000'000); + test_similar(100); + test_similar(1'000'000'000); +} diff --git a/test/graph/2sat.cpp b/test/graph/2sat.cpp new file mode 100644 index 0000000..fc3186e --- /dev/null +++ b/test/graph/2sat.cpp @@ -0,0 +1,133 @@ +#include "../util.h" +#include +#define static vector> adj; static // hacky... +#include +#undef static +#undef adj + +struct RandomClause { + int a, b; + int type; + RandomClause(int n) : + a(Random::integer(0, 2*n)), + b(Random::integer(0, 2*n)), + type(Random::integer(0, 8)) {} + + bool eval(vector& sol) const { + bool ba = sol[a]; + bool bb = sol[b]; + if (type == 0) return !ba || bb; + if (type == 1) return ba == bb; + if (type == 2) return ba || bb; + if (type == 3) return ba != bb; + if (type == 4) return ba && bb; + if (type == 5) return !(ba && bb); + + if (type == 6) return ba; + if (type == 7) return !ba; + return false; + } + + void add(sat2& sat) const { + int va = a; + int vb = b; + if (type == 0) sat.addImpl(va, vb); + if (type == 1) sat.addEquiv(va, vb); + if (type == 2) sat.addOr(va, vb); + if (type == 3) sat.addXor(va, vb); + if (type == 4) sat.addAnd(va, vb); + if (type == 5) sat.addNand(va, vb); + + if (type == 6) sat.addTrue(va); + if (type == 7) sat.addFalse(va); + } + + friend ostream& operator<<(ostream& os, const RandomClause& c) { + if (c.a & 1) os << "-"; + os << (c.a >> 1); + if (c.type == 0) os << "=>"; + if (c.type == 1) os << "=="; + if (c.type == 2) os << "or"; + if (c.type == 3) os << "xor"; + if (c.type == 4) os << "and"; + if (c.type == 5) os << "nand"; + + if (c.type == 6) return os; + if (c.type == 7) return os << "==F"; + + if (c.b & 1) os << "-"; + os << (c.b >> 1); + return os; + } +}; + +bool naive(int n, const vector& clauses) { + for (ll i = 0; i < (1ll << n); i++) { + vector tmp(2*n); + for (ll j = 0; j < n; j++) { + tmp[(2*j) + ((i >> j) & 1)] = 1; + } + bool ok = true; + for (auto& c : clauses) ok &= c.eval(tmp); + if (ok) return true; + } + return false; +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer(1, 12); + int m = Random::integer(0, 30); + + vector clauses; + for (int i = 0; i < m; i++) clauses.emplace_back(n); + + sat2 sat(n); + for (auto& c : clauses) c.add(sat); + adj = sat.adj; + + bool got = sat.solve(); + bool expected = naive(n, clauses); + + if (got) { + for (int i = 0; i < 2*n; i+=2) { + if (sat.sol[i] < 0) cerr << "error: invalid vars" << FAIL; + if (sat.sol[i+1] < 0) cerr << "error: invalid vars" << FAIL; + if (sat.sol[i] == sat.sol[i+1]) cerr << "error: inconsistent vars" << FAIL; + } + for (auto& c : clauses) { + if (!c.eval(sat.sol)) { + cerr << "error: inconsistent" << FAIL; + } + } + } + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 200'000; +constexpr int M = 500'000; +void performance_test() { + timer t; + vector clauses; + for (int i = 0; i < M; i++) clauses.emplace_back(N); + t.start(); + sat2 sat(N); + for (auto& c : clauses) c.add(sat); + t.stop(); + adj = sat.adj; + t.start(); + hash_t hash = sat.solve(); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/LCA_sparse.cpp b/test/graph/LCA_sparse.cpp new file mode 100644 index 0000000..f6eb345 --- /dev/null +++ b/test/graph/LCA_sparse.cpp @@ -0,0 +1,63 @@ +#include "../util.h" +#include +#include +namespace expected { +#include +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer(2, 30); + Graph g(n); + g.tree(); + + vector> adj(n); + g.forEdges([&](int a, int b){ + adj[a].push_back(b); + adj[b].push_back(a); + }); + + LCA lca; + lca.init(adj, 0); + + expected::adj = adj; + expected::init(); + + for (int i = 0; i < n; i++) { + for (int j = 0; j <= i; j++) { + auto got = lca.getLCA(i, j); + auto expected = expected::get_lca(i, j); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + } + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + Graph g(N); + g.tree(); + vector> adj(N); + g.forEdges([&](int a, int b){ + adj[a].push_back(b); + adj[b].push_back(a); + }); + + hash_t hash = 0; + t.start(); + LCA lca; + lca.init(adj, 0); + for (int i = 1; i < N; i++) hash += lca.getLCA(i-1, i); + t.stop(); + if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/TSP.cpp b/test/graph/TSP.cpp new file mode 100644 index 0000000..f9aab2e --- /dev/null +++ b/test/graph/TSP.cpp @@ -0,0 +1,67 @@ +#include "../util.h" +struct edge { + ll dist; + int to; +}; +constexpr ll INF = LL::INF; +#include + +vector naive() { + int n = sz(dist); + vector todo(n - 1); + iota(all(todo), 1); + vector res; + ll best = LL::INF; + do { + int last = 0; + ll cur = 0; + for (int x : todo) { + cur += dist[last][x]; + last = x; + } + cur += dist[last][0]; + if (cur < best) { + best = cur; + res = todo; + res.insert(res.begin(), 0); + res.push_back(0); + } + } while (next_permutation(all(todo))); + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 100'000; i++) { + int n = Random::integer(1, 9); + dist.assign(n, {}); + for (auto& v : dist) v = Random::integers(n, 0, 1000'000'000); + + auto expected = naive(); + auto got = TSP(); + + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 19; +void performance_test() { + timer t; + dist.assign(N, {}); + for (auto& v : dist) v = Random::integers(N, 0, 1000'000'000); + t.start(); + auto got = TSP(); + t.stop(); + + hash_t hash = 0; + for (int x : got) hash += x; + if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/articulationPoints.bcc.cpp b/test/graph/articulationPoints.bcc.cpp new file mode 100644 index 0000000..15f5cf2 --- /dev/null +++ b/test/graph/articulationPoints.bcc.cpp @@ -0,0 +1,78 @@ +#include "../util.h" +struct edge { + ll from, to, id; +}; +#define Edge edge +#include +#undef Edge +#include + +vector> naiveBCC(int m) { + init(m); + + vector seen(sz(adj), -1); + int run = 0; + for (int i = 0; i < sz(adj); i++) { + for (auto e : adj[i]) { + run++; + seen[i] = run; + vector todo = {e.to}; + seen[e.to] = run; + while (!todo.empty()) { + int c = todo.back(); + todo.pop_back(); + for (auto ee : adj[c]) { + if (seen[ee.to] == run) continue; + seen[ee.to] = run; + todo.push_back(ee.to); + } + } + for (auto ee : adj[i]) { + if (seen[ee.to] == run) unionSets(ee.id, e.id); + } + } + } + vector> res(m); + for (int i = 0; i < m; i++) { + res[findSet(i)].push_back(i); + } + for (auto& v : res) sort(all(v)); + res.erase(remove_if(all(res), [](const vector& v){return sz(v) <= 1;}), res.end()); + sort(all(res)); + return res; +} + +void stress_test_bcc() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer(1, 30); + int m = Random::integer(0, max(1, min(300, n*(n-1) / 2 + 1))); + Graph g(n); + g.erdosRenyi(m); + + adj.assign(n, {}); + int nextId = 0; + g.forEdges([&](int a, int b){ + adj[a].push_back({a, b, nextId}); + adj[b].push_back({b, a, nextId}); + nextId++; + }); + + auto expected = naiveBCC(nextId); + find(); + vector> got(sz(bcc)); + for (int i = 0; i < sz(bcc); i++) { + for (auto e : bcc[i]) got[i].push_back(e.id); + sort(all(got[i])); + } + sort(all(got)); + + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +int main() { + stress_test_bcc(); +} diff --git a/test/graph/articulationPoints.bridges.cpp b/test/graph/articulationPoints.bridges.cpp new file mode 100644 index 0000000..a1b89d2 --- /dev/null +++ b/test/graph/articulationPoints.bridges.cpp @@ -0,0 +1,64 @@ +#include "../util.h" +struct edge { + ll from, to, id; +}; +#define Edge edge +#include +#undef Edge + +vector naiveBridges(const vector>& edges) { + vector res(sz(edges)); + + vector seen(sz(adj), -1); + for (int i = 0; i < sz(edges); i++) { + auto [a, b] = edges[i]; + vector todo = {a}; + seen[a] = i; + while (!todo.empty() && seen[b] != i) { + int c = todo.back(); + todo.pop_back(); + for (auto e : adj[c]) { + if (e.id == i) continue; + if (seen[e.to] == i) continue; + seen[e.to] = i; + todo.push_back(e.to); + } + } + res[i] = seen[b] != i; + } + return res; +} + +void stress_test_bridges() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer(1, 30); + int m = Random::integer(0, max(1, min(300, n*(n-1) / 2 + 1))); + Graph g(n); + g.erdosRenyi(m); + + adj.assign(n, {}); + vector> edges; + g.forEdges([&](int a, int b){ + adj[a].push_back({a, b, sz(edges)}); + adj[b].push_back({b, a, sz(edges)}); + edges.emplace_back(a, b); + }); + + auto expected = naiveBridges(edges); + find(); + vector got(sz(edges)); + for (auto e : bridges) { + if (got[e.id]) cerr << "error: duclicate" << FAIL; + got[e.id] = true; + } + + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +int main() { + stress_test_bridges(); +} diff --git a/test/graph/articulationPoints.cpp b/test/graph/articulationPoints.cpp new file mode 100644 index 0000000..2567a09 --- /dev/null +++ b/test/graph/articulationPoints.cpp @@ -0,0 +1,85 @@ +#include "../util.h" +struct edge { + ll from, to, id; +}; +#define Edge edge +#include +#undef Edge + +vector naiveArt() { + vector res(sz(adj)); + + vector seen(sz(adj), -1); + for (int i = 0; i < sz(adj); i++) { + if (adj[i].empty()) continue; + seen[i] = i; + vector todo = {adj[i][0].to}; + seen[todo[0]] = i; + while (!todo.empty()) { + int c = todo.back(); + todo.pop_back(); + for (auto e : adj[c]) { + if (seen[e.to] == i) continue; + seen[e.to] = i; + todo.push_back(e.to); + } + } + for (auto e : adj[i]) { + if (seen[e.to] != i) res[i] = true; + } + } + return res; +} + +void stress_test_art() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer(1, 30); + int m = Random::integer(0, max(1, min(300, n*(n-1) / 2 + 1))); + Graph g(n); + g.erdosRenyi(m); + + adj.assign(n, {}); + int nextId = 0; + g.forEdges([&](int a, int b){ + adj[a].push_back({a, b, nextId}); + adj[b].push_back({b, a, nextId}); + nextId++; + }); + + auto expected = naiveArt(); + find(); + vector got = isArt; + + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 500'000; +constexpr int M = 2'000'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + adj.assign(N, {}); + int nextId = 0; + g.forEdges([&](int a, int b){ + adj[a].push_back({a, b, nextId}); + adj[b].push_back({b, a, nextId}); + nextId++; + }); + + t.start(); + find(); + t.stop(); + hash_t hash = sz(bridges) + sz(bcc); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test_art(); + performance_test(); +} diff --git a/test/graph/bellmannFord.cpp b/test/graph/bellmannFord.cpp new file mode 100644 index 0000000..92f1fef --- /dev/null +++ b/test/graph/bellmannFord.cpp @@ -0,0 +1,70 @@ +#include "../util.h" +constexpr ll INF = LL::INF; +struct edge { + int from, to; + ll cost; +}; +#include +namespace floydWarshall { +#include +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(2, 30); + int m = Random::integer(n-1, max(n, min(500, n*(n-1) / 2 + 1))); + vector potential = Random::integers(n, 0, 1'000'000'000'000ll); + + vector edges; + floydWarshall::dist.assign(n, vector(n, INF)); + for (int i = 0; i < n; i++) floydWarshall::dist[i][i] = 0; + + Graph g(n); + g.erdosRenyi(m); + g.forEdges([&](int a, int b){ + ll w = Random::integer(1, 100'000'000'000ll); + w = potential[b] + w - potential[a]; + edges.push_back({a, b, w}); + floydWarshall::dist[a][b] = min(floydWarshall::dist[a][b], w); + }); + + floydWarshall::floydWarshall(); + for (int i = 0; i < n; i++) { + auto got = bellmannFord(n, edges, i); + auto expected = floydWarshall::dist[i]; + + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 5'000; +constexpr int M = 20'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + vector edges; + g.forEdges([&](int a, int b){ + ll w1 = Random::integer(1, 1'000'000'000'000ll); + ll w2 = Random::integer(1, 1'000'000'000'000ll); + edges.push_back({a, b, w1}); + edges.push_back({b, a, w2}); + }); + + t.start(); + auto got = bellmannFord(N, edges, 0); + t.stop(); + hash_t hash = 0; + for (auto x : got) hash += x; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/bitonicTSP.cpp b/test/graph/bitonicTSP.cpp new file mode 100644 index 0000000..7c448a2 --- /dev/null +++ b/test/graph/bitonicTSP.cpp @@ -0,0 +1,49 @@ +#include "../util.h" +namespace got { +#include +} +namespace expected { +#include +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer(1, 30); + + vector> dist(n); + for (auto& v : dist) v = Random::reals(n, 0, 1e18); + + got::dist = dist; + expected::dist = dist; + + auto got = got::bitonicTSP(); + auto expected = got::bitonicTSP(); + + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +//this is an easy graph... +constexpr int N = 5'000; +void performance_test() { + timer t; + got::dist = vector>(N); + for (auto& v : got::dist) v = Random::reals(N, 0, 1e18); + + + t.start(); + auto got = got::bitonicTSP(); + t.stop(); + hash_t hash = 0; + for (auto x : got) hash += x; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/bitonicTSPsimple.cpp b/test/graph/bitonicTSPsimple.cpp new file mode 100644 index 0000000..c79a0ef --- /dev/null +++ b/test/graph/bitonicTSPsimple.cpp @@ -0,0 +1,49 @@ +#include "../util.h" +namespace got { +#include +} +namespace expected { +#include +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer(1, 30); + + vector> dist(n); + for (auto& v : dist) v = Random::reals(n, 0, 1e18); + + got::dist = dist; + expected::dist = dist; + + auto got = got::bitonicTSP(); + auto expected = got::bitonicTSP(); + + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +//this is an easy graph... +constexpr int N = 2'000; +void performance_test() { + timer t; + got::dist = vector>(N); + for (auto& v : got::dist) v = Random::reals(N, 0, 1e18); + + + t.start(); + auto got = got::bitonicTSP(); + t.stop(); + hash_t hash = 0; + for (auto x : got) hash += x; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/blossom.cpp b/test/graph/blossom.cpp new file mode 100644 index 0000000..714b029 --- /dev/null +++ b/test/graph/blossom.cpp @@ -0,0 +1,76 @@ +#include "../util.h" +namespace tutte { +void gauss(int n, ll mod); +#include +#include +#include +} +#include + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 5'000; tries++) { + int n = Random::integer(1, 30); + int m = Random::integer(0, max(1, n*(n-1) / 2 + 1)); + + GM blossom(n); + srand(Random::rng()); + tutte::adj.assign(n, {}); + + Graph g(n); + g.erdosRenyi(m); + g.forEdges([&](int a, int b){ + tutte::adj[a].push_back(b); + tutte::adj[b].push_back(a); + + blossom.adj[a].push_back(b); + blossom.adj[b].push_back(a); + }); + + ll got = blossom.match(); + ll expected = tutte::max_matching(); + + vector seen(n); + ll got2 = 0; + for (int i = 0; i < n; i++) { + int j = blossom.pairs[i]; + if (j >= n) continue; + if (blossom.pairs[j] != i) cerr << "error: inconsitent" << FAIL; + if (j == i) cerr << "error: invalid" << FAIL; + if (j < i) continue; + if (seen[i] || seen[j]) cerr << "error: invalid" << FAIL; + seen[i] = seen[j] = true; + got2++; + } + + if (got != got2) cerr << "got: " << got << ", got2: " << got2 << FAIL; + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +//this is an easy graph... +constexpr int N = 100'000; +constexpr int M = 500'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + GM blossom(N); + g.forEdges([&](int a, int b){ + blossom.adj[a].push_back(b); + blossom.adj[b].push_back(a); + }); + + t.start(); + hash_t hash = blossom.match(); + t.stop(); + if (t.time > 200) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/bronKerbosch.cpp b/test/graph/bronKerbosch.cpp new file mode 100644 index 0000000..1ccd493 --- /dev/null +++ b/test/graph/bronKerbosch.cpp @@ -0,0 +1,73 @@ +#include "../util.h" +#include + +vector naiveCliques; + +void naive(bits mask = {}, int l = 0) { + bool maximal = true; + for (ll i = 0; i < l; i++) { + if (mask[i]) continue; + if ((adj[i] & mask) == mask) maximal = false; + } + for (; l < sz(adj); l++) { + if ((adj[l] & mask) == mask) { + maximal = false; + mask[l] = 1; + naive(mask, l + 1); + mask[l] = 0; + } + } + if (maximal and mask.any()) naiveCliques.push_back(mask); +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer(2, 15); + int m = Random::integer(0, max(n, min(500, n*(n-1) / 2 + 1))); + + Graph g(n); + g.erdosRenyi(m); + adj.assign(n, {}); + g.forEdges([&](int a, int b){ + addEdge(a, b); + }); + + bronKerbosch(); + naiveCliques.clear(); + naive(); + + sort(all(cliques), [](bits a, bits b){return a.to_ullong() < b.to_ullong();}); + sort(all(naiveCliques), [](bits a, bits b){return a.to_ullong() < b.to_ullong();}); + + if (cliques != naiveCliques) cerr << "got: " << sz(cliques) << ", expected: " << sz(naiveCliques) << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 55; +constexpr int M = N*(N-1) / 2 - 2*N; +void performance_test() { + timer t; + + Graph g(N); + g.erdosRenyi(M); + adj.assign(N, {}); + g.forEdges([&](int a, int b){ + addEdge(a, b); + }); + + t.start(); + bronKerbosch(); + t.stop(); + + hash_t hash = sz(cliques); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/centroid.cpp b/test/graph/centroid.cpp new file mode 100644 index 0000000..41d9d0f --- /dev/null +++ b/test/graph/centroid.cpp @@ -0,0 +1,77 @@ +#include "../util.h" +vector> adj; +#include + +int subtreeSize(int c, int p) { + int res = 1; + for (int x : adj[c]) { + if (x == p) continue; + res += subtreeSize(x, c); + } + return res; +} + +vector naive() { + vector res; + for (int i = 0; i < sz(adj); i++) { + bool isCentroid = true; + for (int j : adj[i]) isCentroid &= 2*subtreeSize(j, i) <= sz(adj); + if (isCentroid) res.push_back(i); + } + return res; +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(1, 50); + Graph g(n); + g.tree(); + + adj.assign(n, {}); + g.forEdges([&](int a, int b){ + adj[a].push_back(b); + adj[b].push_back(a); + }); + + auto expected = naive(); + sort(all(expected)); + + for (int i = 0; i < n; i++) { + auto [a, b] = find_centroid(i); + vector got; + if (a >= 0) got.push_back(a); + if (b >= 0) got.push_back(b); + sort(all(got)); + + if (got != expected) cerr << "error" << FAIL; + } + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 2'000'000; +void performance_test() { + timer t; + Graph g(N); + g.tree(); + + adj.assign(N, {}); + g.forEdges([&](int a, int b){ + adj[a].push_back(b); + adj[b].push_back(a); + }); + + t.start(); + auto [gotA, gotB] = find_centroid(); + t.stop(); + hash_t hash = gotA + gotB; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/cycleCounting.cpp b/test/graph/cycleCounting.cpp new file mode 100644 index 0000000..8e53aec --- /dev/null +++ b/test/graph/cycleCounting.cpp @@ -0,0 +1,79 @@ +#include "../util.h" +#include +#include + +int naive(const vector>& edges, int n) { + int res = 0; + for (int i = 1; i < (1ll << sz(edges)); i++) { + vector deg(n); + init(n); + int cycles = 0; + for (int j = 0; j < sz(edges); j++) { + if (((i >> j) & 1) != 0) { + auto [a, b] = edges[j]; + deg[a]++; + deg[b]++; + if (findSet(a) != findSet(b)) { + unionSets(a, b); + } else { + cycles++; + } + } + } + bool ok = cycles == 1; + for (auto d : deg) ok &= d == 0 || d == 2; + if (ok) res++; + } + return res; +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 50'000; tries++) { + int n = Random::integer(1, 8); + int m = Random::integer(0, min(15, n*(n-1) / 2 + 1)); + + Graph g(n); + g.erdosRenyi(m); + vector> edges; + cycles cyc(n); + g.forEdges([&](int a, int b){ + edges.emplace_back(a, b); + cyc.addEdge(a, b); + }); + + int expected = naive(edges, n); + int got = cyc.count(); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 100; +constexpr int M = 20; +void performance_test() { + timer t; + + Graph g(N); + g.tree(); + g.erdosRenyi(M); + cycles cyc(N); + g.forEdges([&](int a, int b){ + cyc.addEdge(a, b); + }); + + t.start(); + hash_t hash = cyc.count(); + cerr << sz(cyc.base) << endl; + t.stop(); + + if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/dijkstra.cpp b/test/graph/dijkstra.cpp new file mode 100644 index 0000000..c0cfb7e --- /dev/null +++ b/test/graph/dijkstra.cpp @@ -0,0 +1,64 @@ +#include "../util.h" +constexpr ll INF = LL::INF; +#include +struct edge { + int from, to; + ll cost; +}; +#include + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(2, 30); + int m = Random::integer(n-1, max(n, min(500, n*(n-1) / 2 + 1))); + + vector> adj(n); + vector edges; + + Graph g(n); + g.erdosRenyi(m); + g.forEdges([&](int a, int b){ + ll w = Random::integer(1, 1'000'000'000'000ll); + adj[a].push_back({w, b}); + edges.push_back({a, b, w}); + }); + + for (int i = 0; i < n; i++) { + auto got = dijkstra(adj, i); + auto expected = bellmannFord(n, edges, i); + + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 500'000; +constexpr int M = 3'000'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + vector> adj(N); + g.forEdges([&](int a, int b){ + ll w1 = Random::integer(1, 1'000'000'000'000ll); + ll w2 = Random::integer(1, 1'000'000'000'000ll); + adj[a].push_back({w1, b}); + adj[b].push_back({w2, a}); + }); + + t.start(); + auto got = dijkstra(adj, 0); + t.stop(); + hash_t hash = 0; + for (auto x : got) hash += x; + if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/dinicScaling.cpp b/test/graph/dinicScaling.cpp new file mode 100644 index 0000000..967d6b1 --- /dev/null +++ b/test/graph/dinicScaling.cpp @@ -0,0 +1,61 @@ +#include "../util.h" +namespace dinic { +#include +} + +namespace pushRelabel { +#include +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 20'000; tries++) { + int n = Random::integer(2, 30); + int m = Random::integer(n-1, max(n, min(500, n*(n-1) / 2 + 1))); + + dinic::adj.assign(n, {}); + pushRelabel::adj.assign(n, {}); + + Graph g(n); + g.erdosRenyi(m); + g.forEdges([](int a, int b){ + ll w = Random::integer(1, 1'000'000'000'000ll); + dinic::addEdge(a, b, w); + pushRelabel::addEdge(a, b, w); + }); + + ll got = dinic::maxFlow(0, n - 1); + ll expected = pushRelabel::maxFlow(0, n - 1); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 50000; +constexpr int M = 200000; +void performance_test() { + using namespace dinic; + timer t; + Graph g(N); + g.erdosRenyi(M); + adj.assign(N, {}); + g.forEdges([](int a, int b){ + ll w1 = Random::integer(1, 1'000'000'000'000ll); + ll w2 = Random::integer(1, 1'000'000'000'000ll); + addEdge(a, b, w1); + addEdge(b, a, w2); + }); + + t.start(); + hash_t hash = maxFlow(0, N - 1); + t.stop(); + if (t.time > 2000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/euler.cpp b/test/graph/euler.cpp new file mode 100644 index 0000000..6666040 --- /dev/null +++ b/test/graph/euler.cpp @@ -0,0 +1,87 @@ +#include "../util.h" +struct Euler { + Euler(int n) : idx(n), validIdx(n) {} +#include +}; + +Euler eulerGraph(int n, int m) { + Euler res(n); + + Graph g(n); + g.tree(); + g.forEdges([&](int a, int b) { + res.addEdge(a, b); + }); + + for (int i = n-1; i < m; i++) { + int a = Random::integer(0, n); + int b = Random::integer(0, n); + res.addEdge(a, b); + } + int last = -1; + for (int i = 0; i < n; i++) { + if (sz(res.idx[i]) % 2 != 0) { + if (last >= 0) { + res.addEdge(last, i); + last = -1; + } else { + last = i; + } + } + } + if (last >= 0) cerr << "FAIL" << FAIL; + + return res; +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer(1, 30); + int m = Random::integer(n-1, 200); + + auto g = eulerGraph(n, m); + + vector> expected(n); + for (int i = 0; i < n; i++) { + for (int j : g.idx[i]) { + expected[i].push_back(g.to[j]); + } + sort(all(expected[i])); + } + + g.euler(0); + vector> got(n); + if (g.cycle.front() != g.cycle.back()) cerr << "error: not cyclic" << FAIL; + for (int i = 1; i < sz(g.cycle); i++) { + int a = g.cycle[i-1]; + int b = g.cycle[i]; + got[a].push_back(b); + got[b].push_back(a); + } + for (auto& v : got) sort(all(v)); + + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 100'000; +constexpr int M = 1'000'000; +void performance_test() { + timer t; + auto g = eulerGraph(N, M); + t.start(); + g.euler(0); + t.stop(); + hash_t hash = 0; + for (int x : g.cycle) hash += x; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/floydWarshall.cpp b/test/graph/floydWarshall.cpp new file mode 100644 index 0000000..a93a9ea --- /dev/null +++ b/test/graph/floydWarshall.cpp @@ -0,0 +1,90 @@ +#include "../util.h" +constexpr ll INF = LL::INF; +struct edge { + int from, to; + ll cost; +}; +#include +namespace floydWarshall { +#include +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(2, 30); + int m = Random::integer(n-1, max(n, min(500, n*(n-1) / 2 + 1))); + vector potential = Random::integers(n, 0, 1'000'000'000'000ll); + + vector edges; + floydWarshall::dist.assign(n, vector(n, INF)); + for (int i = 0; i < n; i++) floydWarshall::dist[i][i] = 0; + + Graph g(n); + g.erdosRenyi(m); + g.forEdges([&](int a, int b){ + ll w = Random::integer(1, 100'000'000'000ll); + w = potential[b] + w - potential[a]; + edges.push_back({a, b, w}); + floydWarshall::dist[a][b] = min(floydWarshall::dist[a][b], w); + }); + + vector> orig = floydWarshall::dist; + + floydWarshall::floydWarshall(); + for (int i = 0; i < n; i++) { + for (int j = 0; j < 10; j++) { + int k = Random::integer(0, n); + auto path = floydWarshall::getPath(i, k); + if (path.empty() != (floydWarshall::dist[i][k] == INF)) cerr << "error: reconstruction" << FAIL; + if (path.empty()) continue; + if (path.front() != i) cerr << "error: start" << FAIL; + if (path.back() != k) cerr << "error: end" << FAIL; + for (int l = 1; l < sz(path); l++) { + if (floydWarshall::dist[i][path[l-1]] + + orig[path[l-1]][path[l]] + + floydWarshall::dist[path[l]][k] != + floydWarshall::dist[i][k]) cerr << "error: edge" << FAIL; + } + } + } + + for (int i = 0; i < n; i++) { + auto got = floydWarshall::dist[i]; + auto expected = bellmannFord(n, edges, i); + + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 500; +constexpr int M = 20'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + floydWarshall::dist.assign(N, vector(N, INF)); + for (int i = 0; i < N; i++) floydWarshall::dist[i][i] = 0; + g.forEdges([&](int a, int b){ + ll w1 = Random::integer(1, 1'000'000'000'000ll); + ll w2 = Random::integer(1, 1'000'000'000'000ll); + floydWarshall::dist[a][b] = w1; + floydWarshall::dist[b][a] = w2; + }); + + t.start(); + floydWarshall::floydWarshall(); + t.stop(); + hash_t hash = 0; + for (auto x : floydWarshall::dist[42]) hash += x; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/havelHakimi.cpp b/test/graph/havelHakimi.cpp new file mode 100644 index 0000000..71476ec --- /dev/null +++ b/test/graph/havelHakimi.cpp @@ -0,0 +1,65 @@ +#include "../util.h" +#include + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 200'000; tries++) { + int n = Random::integer(1, 30); + int m = Random::integer(0, n*(n-1) / 2 + 1); + Graph g(n); + g.erdosRenyi(m); + + vector expected(n); + for (int i = 0; i < n; i++) expected[i] = g.deg(i); + + auto res = havelHakimi(expected); + if (sz(res) != n) cerr << "error: wrong number of nodes" << FAIL; + vector> rev(n); + vector got(n); + for (int i = 0; i < n; i++) { + got[i] = sz(res[i]); + for (int j : res[i]) { + if (j < 0 || j >= n) cerr << "error: invalid edge" << FAIL; + rev[j].push_back(i); + } + } + + for (int i = 0; i < n; i++) { + sort(all(res[i])); + sort(all(rev[i])); + if (res[i] != rev[i]) cerr << "error: graph is directed" << FAIL; + for (int j : res[i]) if (j == i) cerr << "error: graph has loop" << FAIL; + for (int j = 1; j < sz(res[i]); j++) { + if (res[i][j] == res[i][j-1]) cerr << "error: multiedge" << FAIL; + } + } + + if (expected != got) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 200'000; +constexpr int M = 1'000'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + + vector expected(N); + for (int i = 0; i < N; i++) expected[i] = g.deg(i); + + t.start(); + auto res = havelHakimi(expected); + t.stop(); + hash_t hash = 0; + for (auto& v : res) hash += sz(v); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/hopcroftKarp.cpp b/test/graph/hopcroftKarp.cpp new file mode 100644 index 0000000..05599dd --- /dev/null +++ b/test/graph/hopcroftKarp.cpp @@ -0,0 +1,74 @@ +#include "../util.h" +namespace kuhn { +#include +} +namespace hk { +#include +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 50'000; tries++) { + int n = Random::integer(1, 30); + int m = Random::integer(0, max(1, n*(n-1) / 2 + 1)); + + kuhn::adj.assign(2*n, {}); + hk::adj.assign(2*n, {}); + + Graph g(n); + g.erdosRenyi(m); + g.forEdges([&](int a, int b){ + kuhn::adj[a].push_back(n+b); + kuhn::adj[b+n].push_back(a); + + hk::adj[a].push_back(n+b); + hk::adj[b+n].push_back(a); + }); + + ll got = hk::hopcroft_karp(n); + ll expected = kuhn::kuhn(n); + + vector seen(2*n); + ll got2 = 0; + for (int i = 0; i < n; i++) { + int j = hk::pairs[i]; + if (j < 0) continue; + if (hk::pairs[j] != i) cerr << "error: inconsitent" << FAIL; + if (j == i) cerr << "error: invalid" << FAIL; + if (j < i) continue; + if (seen[i] || seen[j]) cerr << "error: invalid" << FAIL; + seen[i] = seen[j] = true; + got2++; + } + + if (got != got2) cerr << "got: " << got << ", got2: " << got2 << FAIL; + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +//this is an easy graph... +constexpr int N = 100'000; +constexpr int M = 500'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + hk::adj.assign(2*N, {}); + g.forEdges([&](int a, int b){ + hk::adj[a].push_back(N+b); + hk::adj[b+N].push_back(a); + }); + + t.start(); + hash_t hash = hk::hopcroft_karp(N); + t.stop(); + if (t.time > 300) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/kruskal.cpp b/test/graph/kruskal.cpp new file mode 100644 index 0000000..f6245b9 --- /dev/null +++ b/test/graph/kruskal.cpp @@ -0,0 +1,91 @@ +#include "../util.h" +#include + +struct edge { + int from, to; + ll cost; + bool operator<(const edge& o) const { + return cost > o.cost; + } +}; +ll kruskal(vector& edges, int n) { + init(n); + #define Edge edge + #include + #undef Edge + return cost; +} + +ll prim(vector& edges, int n) { + vector>> adj(n); + for (auto [a, b, d] : edges) { + adj[a].emplace_back(d, b); + adj[b].emplace_back(d, a); + } + priority_queue> todo; + vector seen(n); + ll res = 0; + for (ll i = 0; i < n; i++) { + if (seen[i]) continue; + todo.push({0, i}); + while (!todo.empty()) { + auto [d, c] = todo.top(); + todo.pop(); + if (seen[c]) continue; + seen[c] = true; + res += d; + for (auto e : adj[c]) { + todo.push(e); + } + } + } + return res; +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(2, 30); + int m = Random::integer(0, max(n, min(500, n*(n-1) / 2 + 1))); + + + Graph g(n); + g.erdosRenyi(m); + vector edges; + g.forEdges([&](int a, int b){ + ll w = Random::integer(-1'000'000'000ll, 1'000'000'000ll); + edges.push_back({a, b, w}); + }); + + ll got = kruskal(edges, n); + ll expected = prim(edges, n); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 500'000; +constexpr int M = 3'000'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + vector edges; + g.forEdges([&](int a, int b){ + ll w = Random::integer(-1'000'000'000ll, 1'000'000'000ll); + edges.push_back({a, b, w}); + }); + + t.start(); + hash_t hash = kruskal(edges, N); + t.stop(); + if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/matching.cpp b/test/graph/matching.cpp new file mode 100644 index 0000000..b8fbc6c --- /dev/null +++ b/test/graph/matching.cpp @@ -0,0 +1,62 @@ +#include "../util.h" +namespace tutte { +void gauss(int n, ll mod); +#include +#include +#include +} +#include + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 5'000; tries++) { + int n = Random::integer(1, 30); + int m = Random::integer(0, max(1, n*(n-1) / 2 + 1)); + + GM blossom(n); + srand(Random::rng()); + tutte::adj.assign(n, {}); + + Graph g(n); + g.erdosRenyi(m); + g.forEdges([&](int a, int b){ + tutte::adj[a].push_back(b); + tutte::adj[b].push_back(a); + + blossom.adj[a].push_back(b); + blossom.adj[b].push_back(a); + }); + + ll got = tutte::max_matching(); + ll expected = blossom.match(); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 125; +constexpr int M = 5'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + srand(Random::rng()); + tutte::adj.assign(N, {}); + g.forEdges([&](int a, int b){ + tutte::adj[a].push_back(b); + tutte::adj[b].push_back(a); + }); + + t.start(); + hash_t hash = tutte::max_matching(); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/maxCarBiMatch.cpp b/test/graph/maxCarBiMatch.cpp new file mode 100644 index 0000000..6d7fad0 --- /dev/null +++ b/test/graph/maxCarBiMatch.cpp @@ -0,0 +1,74 @@ +#include "../util.h" +namespace kuhn { +#include +} +namespace hk { +#include +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 50'000; tries++) { + int n = Random::integer(1, 30); + int m = Random::integer(0, max(1, n*(n-1) / 2 + 1)); + + kuhn::adj.assign(2*n, {}); + hk::adj.assign(2*n, {}); + + Graph g(n); + g.erdosRenyi(m); + g.forEdges([&](int a, int b){ + kuhn::adj[a].push_back(n+b); + kuhn::adj[b+n].push_back(a); + + hk::adj[a].push_back(n+b); + hk::adj[b+n].push_back(a); + }); + + ll got = kuhn::kuhn(n); + ll expected = hk::hopcroft_karp(n); + + vector seen(2*n); + ll got2 = 0; + for (int i = 0; i < n; i++) { + int j = kuhn::pairs[i]; + if (j < 0) continue; + if (kuhn::pairs[j] != i) cerr << "error: inconsitent" << FAIL; + if (j == i) cerr << "error: invalid" << FAIL; + if (j < i) continue; + if (seen[i] || seen[j]) cerr << "error: invalid" << FAIL; + seen[i] = seen[j] = true; + got2++; + } + + if (got != got2) cerr << "got: " << got << ", got2: " << got2 << FAIL; + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +//this is an easy graph... +constexpr int N = 10'000; +constexpr int M = 100'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + kuhn::adj.assign(2*N, {}); + g.forEdges([&](int a, int b){ + kuhn::adj[a].push_back(N+b); + kuhn::adj[b+N].push_back(a); + }); + + t.start(); + hash_t hash = kuhn::kuhn(N); + t.stop(); + if (t.time > 200) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/maxWeightBipartiteMatching.cpp b/test/graph/maxWeightBipartiteMatching.cpp new file mode 100644 index 0000000..d245405 --- /dev/null +++ b/test/graph/maxWeightBipartiteMatching.cpp @@ -0,0 +1,59 @@ +#include "../util.h" +#pragma GCC diagnostic ignored "-Wshadow" +namespace matching { + constexpr int N_LEFT = 1000; + constexpr int N_RIGHT = 1000; + constexpr double INF = LD::INF; + #include +} +namespace mcmf { + #include +} + + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 20'000; tries++) { + auto [l, r] = Random::pair(1, 30); + mcmf::MinCostFlow mcmf(l+r+2, 0, 1); + + for (int i = 0; i < l; i++) mcmf.addEdge(0, 2 + i, 1, 0); + for (int i = 0; i < r; i++) mcmf.addEdge(2 + l + i, 1, 1, 0); + for (int i = 0; i < l; i++) { + for (int j = 0; j < r; j++) { + matching::costs[i][j] = Random::integer(-100, 100); + mcmf.addEdge(2 + i, 2 + l + j, 1, -matching::costs[i][j]); + } + } + + double got = matching::match(l, r); + mcmf.mincostflow(); + ll expected = -mcmf.mincost; + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += l + r; + } + cerr << "tested random queries: " << queries << endl; +} + +void performance_test() { + using namespace matching; + timer t; + + for (int i = 0; i < N_LEFT; i++) { + for (int j = 0; j < N_RIGHT; j++) { + costs[i][j] = Random::integer(-100, 100); + } + } + + t.start(); + hash_t hash = match(N_LEFT, N_RIGHT); + t.stop(); + if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/minCostMaxFlow.cpp b/test/graph/minCostMaxFlow.cpp new file mode 100644 index 0000000..8c92aa7 --- /dev/null +++ b/test/graph/minCostMaxFlow.cpp @@ -0,0 +1,68 @@ +#include "../util.h" +#pragma GCC diagnostic ignored "-Wshadow" +namespace matching { + constexpr int N_LEFT = 1000; + constexpr int N_RIGHT = 1000; + constexpr double INF = LD::INF; + #include +} +namespace mcmf { + #include +} + + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 20'000; tries++) { + auto [l, r] = Random::pair(1, 30); + mcmf::MinCostFlow mcmf(l+r+2, 0, 1); + + for (int i = 0; i < l; i++) mcmf.addEdge(0, 2 + i, 1, 0); + for (int i = 0; i < r; i++) mcmf.addEdge(2 + l + i, 1, 1, 0); + for (int i = 0; i < l; i++) { + for (int j = 0; j < r; j++) { + matching::costs[i][j] = Random::integer(-100, 100); + mcmf.addEdge(2 + i, 2 + l + j, 1, -matching::costs[i][j]); + } + } + + mcmf.mincostflow(); + ll got = -mcmf.mincost; + double expected = matching::match(l, r); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += l + r; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'000; +constexpr int M = 10'000; +void performance_test() { + using namespace mcmf; + timer t; + + Graph g(N); + g.erdosRenyi(M); + MinCostFlow mcmf(N, 0, 1); + vector potential = Random::integers(N, 0, 1'000'000ll); + g.forEdges([&](int a, int b){ + ll c = Random::integer(1, 1000'000); + ll cost = Random::integer(0, 1000'000); + mcmf.addEdge(a, b, c, potential[b] + cost - potential[a]); + mcmf.addEdge(b, a, c, potential[a] + cost - potential[b]); + }); + + t.start(); + mcmf.mincostflow(); + t.stop(); + + hash_t hash = mcmf.mincost; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/pushRelabel.cpp b/test/graph/pushRelabel.cpp new file mode 100644 index 0000000..ac3b079 --- /dev/null +++ b/test/graph/pushRelabel.cpp @@ -0,0 +1,61 @@ +#include "../util.h" +namespace dinic { +#include +} + +namespace pushRelabel { +#include +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 20'000; tries++) { + int n = Random::integer(2, 30); + int m = Random::integer(n-1, max(n, min(500, n*(n-1) / 2 + 1))); + + dinic::adj.assign(n, {}); + pushRelabel::adj.assign(n, {}); + + Graph g(n); + g.erdosRenyi(m); + g.forEdges([](int a, int b){ + ll w = Random::integer(1, 1'000'000'000'000ll); + dinic::addEdge(a, b, w); + pushRelabel::addEdge(a, b, w); + }); + + ll got = pushRelabel::maxFlow(0, n - 1); + ll expected = dinic::maxFlow(0, n - 1); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 50000; +constexpr int M = 200000; +void performance_test() { + using namespace pushRelabel; + timer t; + Graph g(N); + g.erdosRenyi(M); + adj.assign(N, {}); + g.forEdges([](int a, int b){ + ll w1 = Random::integer(1, 1'000'000'000'000ll); + ll w2 = Random::integer(1, 1'000'000'000'000ll); + addEdge(a, b, w1); + addEdge(b, a, w2); + }); + + t.start(); + hash_t hash = maxFlow(0, N - 1); + t.stop(); + if (t.time > 300) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/scc.cpp b/test/graph/scc.cpp new file mode 100644 index 0000000..123050f --- /dev/null +++ b/test/graph/scc.cpp @@ -0,0 +1,92 @@ +#include "../util.h" +#include +#include + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(1, 30); + int m = Random::integer(0, max(1, min(100, n*(n-1) / 2 + 1))); + Graph g(n); + g.erdosRenyi(m); + + adj.assign(n, {}); + g.forEdges([](int a, int b){ + adj[a].push_back(b); + }); + scc(); + + vector tmp(n); + for (int i = 0; i < sz(sccs); i++) { + for (int x : sccs[i]) { + if (tmp[x]) cerr << "error: duclicate" << FAIL; + if (idx[x] != i) cerr << "error: inconsistent" << FAIL; + tmp[x] = true; + } + } + for (int i = 0; i < n; i++) { + if (!tmp[i]) cerr << "error: missing" << FAIL; + } + + init(n); + vector seen(n); + int tmpCounter = 0; + auto reach = [&](int a, int b) { + tmpCounter++; + seen[a] = tmpCounter; + vector todo = {a}; + while (seen[b] != tmpCounter && !todo.empty()) { + a = todo.back(); + todo.pop_back(); + g.forOut(a, [&](int /**/, int x){ + if (seen[x] != tmpCounter) { + seen[x] = tmpCounter; + todo.push_back(x); + } + }); + } + return seen[b] == tmpCounter; + }; + for (int a = 0; a < n; a++) { + for (int b = 0; b < a; b++) { + if (findSet(a) == findSet(b)) continue; + if (reach(a, b) && reach(b, a)) unionSets(a, b); + } + } + + for (int a = 0; a < n; a++) { + for (int b = 0; b <= a; b++) { + bool got = idx[a] == idx[b]; + bool expected = findSet(a) == findSet(b); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + } + } + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 500'000; +constexpr int M = 2'000'000; +void performance_test() { + timer t; + Graph g(N); + g.erdosRenyi(M); + adj.assign(N, {}); + g.forEdges([](int a, int b){ + adj[a].push_back(b); + }); + + t.start(); + scc(); + t.stop(); + hash_t hash = 0; + for (int x : idx) hash += x; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/stoerWagner.cpp b/test/graph/stoerWagner.cpp new file mode 100644 index 0000000..2003f09 --- /dev/null +++ b/test/graph/stoerWagner.cpp @@ -0,0 +1,81 @@ +#include "../util.h" +constexpr ll INF = LL::INF; + +namespace stoerWagner { +#include + void addEdge(int u, int v, ll c) { + adj[u].push_back({u, v, c}); + adj[v].push_back({v, u, c}); + } +} + +namespace pushRelabel { +#include + ll minCut() { + ll res = INF; + for (int i = 0; i < sz(adj); i++) { + for (int j = 0; j < i; j++) { + if (i == j) continue; + res = min(res, maxFlow(i, j)); + for (auto& v : adj) { + for (auto& e : v) { + e.f = 0; + } + } + } + } + return res; + } +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 5'000; tries++) { + int n = Random::integer(2, 30); + int m = Random::integer(n-1, max(n, min(500, n*(n-1) / 2 + 1))); + + stoerWagner::adj.assign(n, {}); + pushRelabel::adj.assign(n, {}); + + Graph g(n); + g.erdosRenyi(m); + g.forEdges([](int a, int b){ + ll w = Random::integer(1, 1'000'000'000'000ll); + stoerWagner::addEdge(a, b, w); + pushRelabel::addEdge(a, b, w); + pushRelabel::addEdge(b, a, w); + }); + + ll got = stoerWagner::stoer_wagner(); + ll expected = pushRelabel::minCut(); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 200; +constexpr int M = 10000; +void performance_test() { + using namespace stoerWagner; + timer t; + Graph g(N); + g.erdosRenyi(M); + adj.assign(N, {}); + g.forEdges([](int a, int b){ + ll w = Random::integer(1, 1'000'000'000'000ll); + addEdge(a, b, w); + }); + + t.start(); + hash_t hash = stoer_wagner(); + t.stop(); + if (t.time > 2000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/graph/treeIsomorphism.cpp b/test/graph/treeIsomorphism.cpp new file mode 100644 index 0000000..97f4df4 --- /dev/null +++ b/test/graph/treeIsomorphism.cpp @@ -0,0 +1,126 @@ +#include "../util.h" +struct tree { + tree(int n) : adj(n) {} + #include + #include + + pair treeLabel() { + auto [a, b] = find_centroid(0); + if (a >= 0) a = treeLabel(a); + if (b >= 0) b = treeLabel(b); + if (a > b) swap(a, b); + return {a, b}; + } +}; + +void stress_test_eq() { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(1, 50); + Graph g(n); + g.tree(); + + tree t(n); + + g.forEdges([&](int a, int b){ + t.adj[a].push_back(b); + t.adj[b].push_back(a); + }); + auto [gotA, gotB] = t.treeLabel(); + + g.permutate(); + t.adj.assign(n, {}); + g.forEdges([&](int a, int b){ + t.adj[a].push_back(b); + t.adj[b].push_back(a); + }); + auto [expectedA, expectedB] = t.treeLabel(); + + if (gotA != expectedA) cerr << "got: " << gotA << ", expected: " << expectedA << FAIL; + if (gotB != expectedB) cerr << "got: " << gotB << ", expected: " << expectedB << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +void test_tiny() { + vector expected = {1,1,1,1,2,3,6,11,23}; //#A000055 + for (int i = 1; i < sz(expected); i++) { + set> got; + tree t(i); + + int labeled = 1; + for (int j = 3; j < i; j++) labeled *= i; + for (int j = 0; j < 10 * labeled; j++) { + Graph g(i); + g.tree(); + + t.adj.assign(i, {}); + g.forEdges([&](int a, int b){ + t.adj[a].push_back(b); + t.adj[b].push_back(a); + }); + + got.insert(t.treeLabel()); + } + if (sz(got) != expected[i]) cerr << i << ", got: " << sz(got) << ", expected: " << expected[i] << FAIL; + } + cerr << "tested tiny: " << sz(expected) << endl; +} + +void stress_test_neq() { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(20, 50); + Graph g(n); + g.tree(); + + tree t(n); + + g.forEdges([&](int a, int b){ + t.adj[a].push_back(b); + t.adj[b].push_back(a); + }); + auto [gotA, gotB] = t.treeLabel(); + + g.clear().tree(); + t.adj.assign(n, {}); + g.forEdges([&](int a, int b){ + t.adj[a].push_back(b); + t.adj[b].push_back(a); + }); + auto [expectedA, expectedB] = t.treeLabel(); + + if (gotA == expectedA && gotA >= 0) cerr << "error: " << n << ", " << tries << FAIL; + if (gotB == expectedB) cerr << "error: " << n << ", " << tries << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 500'000; +void performance_test() { + timer t; + Graph g(N); + g.tree(); + + tree tt(N); + g.forEdges([&](int a, int b){ + tt.adj[a].push_back(b); + tt.adj[b].push_back(a); + }); + + t.start(); + auto [gotA, gotB] = tt.treeLabel(); + t.stop(); + hash_t hash = gotA + gotB; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test_eq(); + test_tiny(); + stress_test_neq(); + performance_test(); +} diff --git a/test/math/berlekampMassey.cpp b/test/math/berlekampMassey.cpp new file mode 100644 index 0000000..58fd143 --- /dev/null +++ b/test/math/berlekampMassey.cpp @@ -0,0 +1,68 @@ +#include "../util.h" +#include +#include + +struct RandomRecurence { + vector f, c, cache; + RandomRecurence(int n) : f(Random::integers(n, 0, mod)), c(Random::integers(n, 0, mod)), cache(f) {} + RandomRecurence(const vector& f_, const vector& c_) : c(c_), cache(f_) { + if (cache.size() < c.size()) cerr << "wrong size" << FAIL; + cache.resize(c.size()); + f = cache; + } + + ll operator()(ll k){ + while (sz(cache) <= k) { + ll cur = 0; + for (ll i = 0; i < sz(c); i++) { + cur += (c[i] * cache[sz(cache) - i - 1]) % mod; + } + cur %= mod; + cache.push_back(cur); + } + return cache[k]; + } +}; + +void stress_test() { + int queries = 0; + for (int i = 0; i < 50'000; i++) { + int n = Random::integer(1, 10); + RandomRecurence expected(n); + + ll k = Random::integer(2*n, 100); + vector s(k); + for (ll j = 0; j < k; j++) s[j] = expected(j); + + auto res = BerlekampMassey(s); + RandomRecurence got(s, res); + + for (ll j = 0; j < 3*k; j++) { + if (got(j) != expected(j)) cerr << "got: " << got(j) << ", expected: " << expected(j) << FAIL; + } + + queries += k; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 5'000; +void performance_test() { + timer t; + RandomRecurence f(N); + f(2*N); + t.start(); + auto res = BerlekampMassey(f.cache); + t.stop(); + hash_t hash = 0; + for (ll x : res) hash += x; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/bigint.cpp b/test/math/bigint.cpp new file mode 100644 index 0000000..3fc4ac1 --- /dev/null +++ b/test/math/bigint.cpp @@ -0,0 +1,122 @@ +#include "../util.h" +#include + +template +struct modInt { + ll value = 0; + modInt() {} + modInt(const bigint& x) { + stringstream a; + a << x; + string b = a.str(); + for (ll i = b[0] == '-' ? 1 : 0; i < sz(b); i++) { + value *= 10; + value += b[i] - '0'; + value %= MOD; + } + if (b[0] == '-') value = (MOD - value) % MOD; + } + + modInt(ll x) : value(((x % MOD) + MOD) % MOD) {} + + modInt operator+(modInt o) const {return value + o.value;} + modInt operator-(modInt o) const {return value - o.value;} + modInt operator*(modInt o) const {return value * o.value;} + + modInt& operator+=(modInt o) {return *this = *this + o;} + modInt& operator-=(modInt o) {return *this = *this - o;} + modInt& operator*=(modInt o) {return *this = *this * o;} + + ll& operator*() {return value;} + bool operator==(const modInt& o) const {return value == o.value;} + bool operator!=(const modInt& o) const {return value != o.value;} +}; + +constexpr ll MOD = 1'394'633'899; +constexpr ll POOL = 8; + +void stress_test() { + int queries = 0; + for (int tries = 0; tries < 1000; tries++) { + vector> expectedPool(POOL); + vector gotPool(POOL); + for (int i = 0; i < POOL; i++) { + ll x = Random::integer(-1'000'000'000'000'000'000ll, 1'000'000'000'000'000'000ll); + expectedPool[i] = x; + gotPool[i] = x; + if (expectedPool[i] != modInt(gotPool[i])) cerr << "error: 0" << FAIL; + } + for (int i = 0; i < 200; i++) { + int a = Random::integer(0, POOL); + int b = Random::integer(0, POOL); + int o = Random::integer(0, 3); + + if (Random::integer(0, 2) == 0) {//x= + auto tmpExpected = expectedPool[a]; + auto tmpGot = gotPool[a]; + + if (o == 0) { + tmpExpected += expectedPool[b]; + tmpGot += gotPool[b]; + } + if (o == 1) { + tmpExpected -= expectedPool[b]; + tmpGot -= gotPool[b]; + } + if (o == 2) { + tmpExpected -= expectedPool[b]; + tmpGot -= gotPool[b]; + } + + if (tmpExpected != modInt(tmpGot)) { + cerr << gotPool[a]; + if (o == 0) cerr << "+"; + if (o == 1) cerr << "-"; + if (o == 2) cerr << "*"; + cerr << gotPool[b] << "=" << tmpGot << endl; + cerr << "error: 1" << FAIL; + } + + expectedPool[b] = tmpExpected; + gotPool[b] = tmpGot; + } else {//x + int c = Random::integer(0, POOL); + + modInt tmpExpected; + bigint tmpGot; + + if (o == 0) { + tmpExpected = expectedPool[a] + expectedPool[b]; + tmpGot = gotPool[a] + gotPool[b]; + } + if (o == 1) { + tmpExpected = expectedPool[a] - expectedPool[b]; + tmpGot = gotPool[a] - gotPool[b]; + } + if (o == 2) { + tmpExpected = expectedPool[a] * expectedPool[b]; + tmpGot = gotPool[a] * gotPool[b]; + } + + if (tmpExpected != modInt(tmpGot)) { + cerr << gotPool[a]; + if (o == 0) cerr << "+"; + if (o == 1) cerr << "-"; + if (o == 2) cerr << "*"; + cerr << gotPool[b] << "=" << tmpGot << endl; + cerr << "error: 2" << FAIL; + } + + expectedPool[c] = tmpExpected; + gotPool[c] = tmpGot; + } + queries++; + } + } + cerr << "tested random queries: " << queries << endl; +} + +int main() { + stress_test(); +} + diff --git a/test/math/binomial0.cpp b/test/math/binomial0.cpp new file mode 100644 index 0000000..00c04d4 --- /dev/null +++ b/test/math/binomial0.cpp @@ -0,0 +1,31 @@ +#include "../util.h" +#include +#include +constexpr ll mod = 1'394'633'899; +#include + + +void stress_test() { + vector last = {1}; + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + for (ll j = 0; j <= i; j++) { + ll got = calc_binom(i, j); + ll expected = last[j]; + if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; + } + queries += sz(last); + + last.push_back(1); + for (ll j = i; j > 0; j--) { + last[j] = (last[j] + last[j - 1]) % mod; + } + } + cerr << "tested queries: " << queries << endl; +} + +int main() { + precalc(); + stress_test(); +} + diff --git a/test/math/binomial1.cpp b/test/math/binomial1.cpp new file mode 100644 index 0000000..f6fe20b --- /dev/null +++ b/test/math/binomial1.cpp @@ -0,0 +1,27 @@ +#include "../util.h" +#include + + +void stress_test() { + vector last = {1}; + ll queries = 0; + for (ll i = 0; i <= 61; i++) { + for (ll j = 0; j <= i; j++) { + ll got = calc_binom(i, j); + ll expected = last[j]; + if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; + } + queries += sz(last); + + last.push_back(1); + for (ll j = i; j > 0; j--) { + last[j] = last[j] + last[j - 1]; + } + } + cerr << "tested queries: " << queries << endl; +} + +int main() { + stress_test(); +} + diff --git a/test/math/binomial2.cpp b/test/math/binomial2.cpp new file mode 100644 index 0000000..b55c8af --- /dev/null +++ b/test/math/binomial2.cpp @@ -0,0 +1,29 @@ +#include "../util.h" +#include +#include + + +void stress_test() { + vector last = {1}; + ll queries = 0; + for (ll i = 0; i <= 1000; i++) { + for (ll j = 0; j <= i; j++) { + ll got = calc_binom(i, j); + ll expected = last[j]; + if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; + } + queries += sz(last); + + last.push_back(1); + for (ll j = i; j > 0; j--) { + last[j] = (last[j] + last[j - 1]) % mod; + } + } + cerr << "tested queries: " << queries << endl; +} + +int main() { + primeSieve(); + stress_test(); +} + diff --git a/test/math/binomial3.cpp b/test/math/binomial3.cpp new file mode 100644 index 0000000..4a99689 --- /dev/null +++ b/test/math/binomial3.cpp @@ -0,0 +1,31 @@ +#include "../util.h" +#include +#include +#include + + +constexpr ll mod = 503; + +void stress_test() { + vector last = {1}; + ll queries = 0; + for (ll i = 0; i < mod; i++) { + for (ll j = 0; j <= i; j++) { + ll got = calc_binom(i, j, mod); + ll expected = last[j]; + if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL; + } + queries += sz(last); + + last.push_back(1); + for (ll j = i; j > 0; j--) { + last[j] = (last[j] + last[j - 1]) % mod; + } + } + cerr << "tested queries: " << queries << endl; +} + +int main() { + stress_test(); +} + diff --git a/test/math/chineseRemainder.cpp b/test/math/chineseRemainder.cpp new file mode 100644 index 0000000..26e71de --- /dev/null +++ b/test/math/chineseRemainder.cpp @@ -0,0 +1,47 @@ +#include "../util.h" +#include +#include + +struct NAIVE { + vector> added; + void add(ll a, ll m) { + added.emplace_back(a, m); + } + ll sol() const { + ll n = 1; + for (auto [_, x] : added) n = lcm(n, x); + for (ll i = 0; i < n; i++) { + bool ok = true; + for (auto [a, m] : added) { + ok &= (i % m) == a; + } + if (ok) return i; + } + return -1; + } +}; + +void stress_test() { + ll queries = 0; + ll withSol = 0; + for (ll i = 0; i < 100'000; i++) { + CRT crt; + NAIVE naive; + for (ll j = 0; j < 3; j++) { + int m = Random::integer(1, 50); + int a = Random::integer(0, m); + crt.add(a, m); + naive.add(a, m); + } + if (crt.hasSol != (naive.sol() >= 0)) cerr << "error" << FAIL; + if (crt.hasSol && crt.sol != naive.sol()) cerr << "got: " << (ll)crt.sol << ", expected: " << naive.sol() << FAIL; + queries += crt.M; + withSol += crt.hasSol; + } + cerr << "tested queries: " << queries << "(" << withSol << ")" << endl; +} + +int main() { + stress_test(); +} + diff --git a/test/math/cycleDetection.cpp b/test/math/cycleDetection.cpp new file mode 100644 index 0000000..bf57aed --- /dev/null +++ b/test/math/cycleDetection.cpp @@ -0,0 +1,47 @@ +#include "../util.h" +#include +#include + +pair naive(ll x0, function f) { + map seen; + ll d = 0; + while (seen.find(x0) == seen.end()) { + seen[x0] = d; + d++; + x0 = f(x0); + } + return {seen[x0], d - seen[x0]}; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 1000'000; i++) { + int m = Random::integer(1, 100); + int c = Random::integer(0, m); + auto f = [&](ll x){return (x*x + c) % m;}; + int x0 = Random::integer(0, m); + auto got = cycleDetection(x0, f); + auto expected = naive(x0, f); + if (got != expected) cerr << "error: " << got.first << " " << got.second << " " << expected.first << " " << expected.second << FAIL; + queries += got.second; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr ll M = 18'086'183; +void performance_test() { + timer t; + auto f = [&](ll x){return (1337*x + 42) % M;}; + t.start(); + auto [a, b] = cycleDetection(42, f); + t.stop(); + hash_t hash = a + b; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/discreteLogarithm.cpp b/test/math/discreteLogarithm.cpp new file mode 100644 index 0000000..0f9eecf --- /dev/null +++ b/test/math/discreteLogarithm.cpp @@ -0,0 +1,64 @@ +#include "../util.h" +#include +#include + +ll overwrite = 0; +ll getMemory(ll /**/) {return overwrite - 1;} //dlog code adds one... +#define sqrtl getMemory +#include +#undef sqrtl + +template +void stress_test(F&& f) { + ll work = 0; + for (ll tries = 0; tries < 3'000; tries++) { + ll p = Random::prime(1'000); + overwrite = f(p); + ll a = Random::integer(1, p); + vector naive(p); + for (ll i = 0, j = 1; i < p; i++, j = (j * a) % p) { + naive[j] = true; + } + for (ll b = 0; b < p; b++) { + ll got = dlog(a, b, p); + if (got < -1 || got >= p) cerr << "error: out of range" << FAIL; + if ((got >= 0) != naive[b]) { + cerr << a << " " << b << " " << p << endl; + cerr << got << endl; + cerr << "error" << FAIL; + } + if (got >= 0 && powMod(a, got, p) != b) { + cerr << a << "^" << got << " = " << powMod(a, got, p) << " != " << b << " % " << p << endl; + cerr << "error: wrong" << FAIL; + } + work++; + } + } + cerr << "stress tested: " << work << endl; +} + +constexpr int N = 25; +constexpr ll mod = 1'394'633'899; +void performance_test() { + timer t; + hash_t hash = 0; + overwrite = sqrt(mod); + for (int operations = 0; operations < N; operations++) { + ll a = Random::integer(1, mod); + ll b = Random::integer(0, mod); + t.start(); + hash += dlog(a, b, mod); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + + +int main() { + stress_test([](ll p){return sqrtl(p);}); + stress_test([](ll p){return min(10, p - 1);}); + stress_test([](ll p){return min(p - 1, sqrtl(p) + 100);}); + performance_test(); +} + diff --git a/test/math/discreteNthRoot.cpp b/test/math/discreteNthRoot.cpp new file mode 100644 index 0000000..d595e6d --- /dev/null +++ b/test/math/discreteNthRoot.cpp @@ -0,0 +1,78 @@ +#include "../util.h" +#define ll lll +#include +#undef ll +#include +#include + +ll phi(ll pk, ll p, ll /*k*/) {return pk - pk / p;} +ll phi(ll n) { // O(sqrt(n)) + ll res = 1; + for (ll p = 2; p * p <= n; p++) { + if (n % p == 0) { + ll pk = 1; + ll k = 0; + do { + n /= p; + pk *= p; + k++; + } while (n % p == 0); + res *= phi(pk, p, k); + }} + if (n > 1) res *= phi(n, n, 1); + return res; +} + +#include +#include +#include + +//x^a=b mod m +ll naiveRoot(ll a, ll b, ll m) { + for (ll i = 0; i < m; i++) { + if (powMod(i, a, m) == b) return i; + } + return -1; +} + +void stress_test() { + int queries = 0; + int found = 0; + for (int tries = 0; tries < 50'000; tries++) { + ll p = Random::prime(0, 1000); + ll a = Random::integer(1, p); + ll b = Random::integer(1, p); + + ll got = root(a, b, p); + ll expected = naiveRoot(a, b, p); + + if (got < -1 || got >= p) cerr << "error: out of range" << FAIL; + if (got >= 0 && powMod(got, a, p) != b) cerr << "error: wrong" << FAIL; + if ((got >= 0) != (expected >= 0)) cerr << "error" << FAIL; + queries++; + if (expected >= 0) found++; + } + cerr << "tested random queries: " << queries << " (" << found << ")" << endl; +} + +constexpr int N = 50; +constexpr ll mod = 1'394'633'899; +void performance_test() { + timer t; + hash_t hash = 0; + for (int i = 0; i < N; i++) { + ll a = Random::integer(1, mod); + ll b = Random::integer(1, mod); + t.start(); + hash += root(a, b, mod); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/divisors.cpp b/test/math/divisors.cpp new file mode 100644 index 0000000..2402d2a --- /dev/null +++ b/test/math/divisors.cpp @@ -0,0 +1,65 @@ +#include "../util.h" +#define ll lll +#include +#undef ll +#include + +bool isSquare(ll x) { + ll r = sqrtl(x); + while (r*r > x) r--; + while ((r+1)*(r+1) <= x) r++; + return r*r==x; +} + +#include + +ll naive(ll x) { + ll res = 0; + for (ll i = 1; i*i <= x; i++) { + if (x % i == 0) { + res++; + if (i*i != x) res++; + } + } + return res; +} + +void stress_test() { + ll work = 0; + for (ll i = 0; i < 1'000; i++) { + ll x = Random::integer(1, 1'000'000'000'000); + auto got = countDivisors(x); + auto expected = naive(x); + if (got != expected) cerr << "error: " << x << FAIL; + work += sqrt(x); + } + for (ll i = 0; i < 100'000; i++) { + ll x = Random::integer(1, 1'000'000); + auto got = countDivisors(x); + auto expected = naive(x); + if (got != expected) cerr << "error: " << x << FAIL; + work += sqrt(x); + } + cerr << "stress tested: " << work << endl; +} + +constexpr int N = 200; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll x = Random::integer(1e18 / 2, 1e18); + t.start(); + hash += countDivisors(x); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/extendedEuclid.cpp b/test/math/extendedEuclid.cpp new file mode 100644 index 0000000..597f722 --- /dev/null +++ b/test/math/extendedEuclid.cpp @@ -0,0 +1,41 @@ +#include "../util.h" +#include + +void stress_test() { + if (extendedEuclid(0, 0)[0] != 0) cerr << "error: extendedEuclid(0, 0)" << FAIL; + ll queries = 0; + timer t; + for (int i = 0; i < 1'000'000; i++) { + ll a = Random::integer(0, 1'000'000'000); + ll b = 0; + { + t.start(); + auto [got, x, y] = extendedEuclid(a, b); + t.stop(); + ll expected = std::gcd(a, b); + if (got != expected) cerr << "gcd(" << a << ", " << b << "), got: " << got << ", expected: " << expected << FAIL; + if (abs(x) >= max(2, abs(b))) cerr << "invalid x" << FAIL; + if (abs(y) >= max(2, abs(a))) cerr << "invalid y" << FAIL; + if (a*x + b*y != expected) cerr << "invalid x or y" << FAIL; + } + b = Random::integer(0, 1'000'000'000); + { + t.start(); + auto [got, x, y] = extendedEuclid(a, b); + t.stop(); + ll expected = std::gcd(a, b); + if (got != expected) cerr << "gcd(" << a << ", " << b << "), got: " << got << ", expected: " << expected << FAIL; + if (abs(x) >= max(2, abs(b))) cerr << "invalid x" << FAIL; + if (abs(y) >= max(2, abs(a))) cerr << "invalid y" << FAIL; + if (a*x + b*y != expected) cerr << "invalid x or y" << FAIL; + } + queries++; + } + cerr << "tested random queries: " << queries << endl; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms" << endl; +} + +int main() { + stress_test(); +} diff --git a/test/math/gauss.cpp b/test/math/gauss.cpp new file mode 100644 index 0000000..37bacce --- /dev/null +++ b/test/math/gauss.cpp @@ -0,0 +1,118 @@ +#include "../util.h" +constexpr double EPS = 1e-9; +constexpr int UNIQUE = 1; +constexpr int INCONSISTENT = 2; +constexpr int MULTIPLE = 3; +vector> mat; +#include + +vector> inverseMat(const vector>& m) { + int n = sz(m); + mat = m; + for (int i = 0; i < n; i++) { + if (sz(mat[i]) != n) cerr << "error: no square matrix" << FAIL; + mat[i].resize(2*n); + mat[i][n+i] = 1; + } + gauss(n);//the unique cetc. checks are not usefull since we dont solve an lgs... + vector> res(m); + for (int i = 0; i < n; i++) { + res[i] = vector(mat[i].begin() + n, mat[i].end()); + for (int j = 0; j < n; j++) { + if (j != i && mat[i][j] != 0) cerr << "error: not full rank?" << FAIL; + if (j == i && mat[i][j] == 0) cerr << "error: not full rank?" << FAIL; + } + } + return res; +} + +vector> mul(const vector>& a, const vector>& b) { + int n = sz(a); + int m = sz(b[0]); + int x = sz(b); + if (sz(a[0]) != sz(b)) cerr << "error: wrong dimensions" << FAIL; + vector> res(n, vector(m)); + for (int i = 0; i < n; i++) { + for (int j = 0; j < m; j++) { + for (int k = 0; k < x; k++) { + res[i][j] += a[i][k] * b[k][j]; + } + } + } + return res; +} + +void test_tiny() { + mat = { + {1, 2, 3, 4}, + {0, 5, 6, 7}, + {0, 0, 8, 9}, + }; + if (gauss(sz(mat)) != UNIQUE) cerr << "error: 1" << FAIL; + + mat = { + {-1, 1, 0, -1}, + { 2, 6, 0, 10}, + { 1, -2, 0, 0}, + }; + if (gauss(sz(mat)) != MULTIPLE) cerr << "error: 2" << FAIL; + + mat = { + {-1, 1, 0, -1}, + { 2, 6, 0, 10}, + { 1, -2, 0, 1}, + }; + if (gauss(sz(mat)) != INCONSISTENT) cerr << "error: 3" << FAIL; +} + +void stress_test_inv() { + ll queries = 0; + for (int tries = 0; tries < 20'000; tries++) { + int n = Random::integer(1, 30); + + vector> m(n); + for (auto& v : m) v = Random::reals(n, 0, 1'000); + // m hopefully has full rank... + + auto inv = inverseMat(m); + + auto prod = mul(m, inv); + + for (int i = 0; i < n; i++) { + for (int j = 0; j < n; j++) { + if (i == j && abs(prod[i][j] - 1) >= EPS) cerr << "error: not inverted " << prod[i][j] << FAIL; + if (i != j && abs(prod[i][j] - 0) >= EPS) cerr << "error: not inverted " << prod[i][j] << FAIL; + } + } + + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 250; +void performance_test() { + timer t; + + vector> m(N); + for (auto& v : m) v = Random::reals(N, 0, 1'000); + mat = m; + + t.start(); + gauss(N); + t.stop(); + hash_t hash = 0; + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + hash += mat[i][j]; + } + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + test_tiny(); + stress_test_inv(); + performance_test(); +} diff --git a/test/math/gcd-lcm.cpp b/test/math/gcd-lcm.cpp new file mode 100644 index 0000000..294095b --- /dev/null +++ b/test/math/gcd-lcm.cpp @@ -0,0 +1,46 @@ +#include "../util.h" +#include + +void stress_test() { + if (::gcd(0, 0) != 0) cerr << "error: gcd(0, 0)" << FAIL; + if (::lcm(0, 0) != 0) cerr << "error: lcm(0, 0)" << FAIL; + ll queries = 0; + timer t; + for (int i = 0; i < 1'000'000; i++) { + ll a = Random::integer(0, 1'000'000'000); + ll b = 0; + { + ll got = ::gcd(a, b); + ll expected = std::gcd(a, b); + if (got != expected) cerr << "gcd(" << a << ", " << b << "), got: " << got << ", expected: " << expected << FAIL; + } + { + ll got = ::lcm(a, b); + ll expected = std::lcm(a, b); + if (got != expected) cerr << "lcm(" << a << ", " << b << "), got: " << got << ", expected: " << expected << FAIL; + } + b = Random::integer(0, 1'000'000'000); + { + t.start(); + ll got = ::gcd(a, b); + t.stop(); + ll expected = std::gcd(a, b); + if (got != expected) cerr << "gcd(" << a << ", " << b << "), got: " << got << ", expected: " << expected << FAIL; + } + { + t.start(); + ll got = ::lcm(a, b); + t.stop(); + ll expected = std::lcm(a, b); + if (got != expected) cerr << "lcm(" << a << ", " << b << "), got: " << got << ", expected: " << expected << FAIL; + } + queries++; + } + cerr << "tested random queries: " << queries << endl; + if (t.time > 750) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms" << endl; +} + +int main() { + stress_test(); +} diff --git a/test/math/goldenSectionSearch.cpp b/test/math/goldenSectionSearch.cpp new file mode 100644 index 0000000..565a21c --- /dev/null +++ b/test/math/goldenSectionSearch.cpp @@ -0,0 +1,74 @@ +#include "../util.h" +#include + +struct RandomFunction { + ld min; + vector> polys; + RandomFunction(ld l, ld r) : min(Random::real(l, r)) { + do { + polys.emplace_back( + Random::real(0, 1e9), + 2 * Random::integer(1, 5) + ); + } while(false && Random::integer(4) != 0); + } + + ld operator()(ld x){ + ld res = 0; + for (auto [m, p] : polys) { + res += m * pow(x - min, p); + } + return res; + } + + friend ostream& operator<<(ostream& os, const RandomFunction& f) { + string plus = ""; + for (auto [m, p] : f.polys) { + os << setprecision(15) << plus << m << "*(x-" << f.min << ")**" << p; + plus = "+"; + } + return os; + } +}; + +void stress_test() { + int queries = 0; + for (int i = 0; i < 50'000; i++) { + ld l = Random::real(-200, 200); + ld r = Random::real(-200, 200); + if (l > r) swap(l, r); + + RandomFunction f(l, r); + + ld got = gss(l, r, f); + ld expected = f.min; + if (float_error(got, expected) > 1e-6) { + cerr << f << endl; + cerr << "got: " << got << ", expected: " << expected << FAIL; + } + queries++; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 10'000; +void performance_test() { + timer t; + RandomFunction f(-200, 200); + f.polys.resize(1); + + hash_t hash = 0; + for (int i = 0; i < N; i++) { + t.start(); + hash += gss(-200, 200, f); + t.stop(); + } + if (t.time > 1000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/inversions.cpp b/test/math/inversions.cpp new file mode 100644 index 0000000..d2a54b7 --- /dev/null +++ b/test/math/inversions.cpp @@ -0,0 +1,43 @@ +#include "../util.h" +#include +#include + +ll naive(const vector& v) { + ll res = 0; + for (ll i = 0; i < sz(v); i++) { + for (ll j = 0; j < i; j++) { + if (v[j] > v[i]) res++; + } + } + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 100'000; i++) { + int n = Random::integer(1, 100); + auto v = Random::integers(n, -50, 50); + ll got = inversions(v); + ll expected = naive(v); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 200'000; +void performance_test() { + timer t; + auto v = Random::integers(N, -10'000, 10'000); + t.start(); + hash_t hash = inversions(v); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/inversionsMerge.cpp b/test/math/inversionsMerge.cpp new file mode 100644 index 0000000..85ab0d2 --- /dev/null +++ b/test/math/inversionsMerge.cpp @@ -0,0 +1,46 @@ +#include "../util.h" +#include + +ll naive(const vector& v) { + ll res = 0; + for (ll i = 0; i < sz(v); i++) { + for (ll j = 0; j < i; j++) { + if (v[j] > v[i]) res++; + } + } + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 100'000; i++) { + int n = Random::integer(1, 100); + vector v(n); + for (ll j = 0; j < n; j++) v[j] = (j-10) * 100000 + Random::integer(0, 10000);//values must be unique ): + shuffle(all(v), Random::rng); + ll expected = naive(v); + ll got = mergeSort(v); + if (got != expected) { + cerr << "got: " << got << ", expected: " << expected << FAIL; + } + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 2'000'000; //10 times faster +void performance_test() { + timer t; + auto v = Random::integers(N, -10'000, 10'000); + t.start(); + hash_t hash = mergeSort(v); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/kthperm.cpp b/test/math/kthperm.cpp new file mode 100644 index 0000000..16691b9 --- /dev/null +++ b/test/math/kthperm.cpp @@ -0,0 +1,38 @@ +#include "../util.h" +#include +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = Random::integer(1, 100); + vector expected(n); + iota(all(expected), 0); + ll k = 0; + do { + auto got = kthperm(n, k); + if (got != expected) cerr << "error" << FAIL; + k++; + } while (k < 100 && next_permutation(all(expected))); + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 500'000; +void performance_test() { + timer t; + t.start(); + auto got = kthperm(N, 4'168'751'907'498'170ll); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * got[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/kthperm_permIndex.cpp b/test/math/kthperm_permIndex.cpp new file mode 100644 index 0000000..d84524e --- /dev/null +++ b/test/math/kthperm_permIndex.cpp @@ -0,0 +1,21 @@ +#include "../util.h" +#include +#include +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + ll n = Random::integer(20, 1000); + ll expected = Random::integer(0, 1'000'000'000'000'000'000); + ll got = permIndex(kthperm(n, expected)); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +int main() { + stress_test(); +} + diff --git a/test/math/legendre.cpp b/test/math/legendre.cpp new file mode 100644 index 0000000..f210b57 --- /dev/null +++ b/test/math/legendre.cpp @@ -0,0 +1,43 @@ +#include "../util.h" +#define ll lll +#include +#undef ll +#include + +void stress_test() { + ll work = 0; + for (ll i = 0; i < 5'000; i++) { + ll p = Random::prime(5'000); + vector isSquare(p); + for (ll j = 1; j < p; j++) isSquare[(j*j) % p] = true; + for (ll j = 0; j < p; j++) { + auto got = legendre(j, p); + auto expected = j == 0 ? 0 : (isSquare[j] ? 1 : -1); + if (got != expected) cerr << "error: " << j << " " << p << FAIL; + } + work += p; + } + cerr << "stress tested: " << work << endl; +} + +constexpr int N = 1'000'000; +constexpr ll mod = 1'394'633'899; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll j = Random::integer(mod); + t.start(); + hash += legendre(j, mod); + t.stop(); + } + if (t.time > 750) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/lgsFp.cpp b/test/math/lgsFp.cpp new file mode 100644 index 0000000..08f8f84 --- /dev/null +++ b/test/math/lgsFp.cpp @@ -0,0 +1,118 @@ +#include "../util.h" +#include +vector> mat; +#include + +constexpr ll mod = 1'000'000'007; + +vector> inverseMat(const vector>& m) { + int n = sz(m); + mat = m; + for (int i = 0; i < n; i++) { + if (sz(mat[i]) != n) cerr << "error: no square matrix" << FAIL; + mat[i].resize(2*n); + mat[i][n+i] = 1; + } + gauss(n, mod); + vector> res(m); + for (int i = 0; i < n; i++) { + res[i] = vector(mat[i].begin() + n, mat[i].end()); + for (int j = 0; j < n; j++) { + if (j != i && mat[i][j] != 0) cerr << "error: not full rank?" << FAIL; + if (j == i && mat[i][j] != 1) cerr << "error: not full rank?" << FAIL; + } + } + return res; +} + +vector> mul(const vector>& a, const vector>& b) { + int n = sz(a); + int m = sz(b[0]); + int x = sz(b); + if (sz(a[0]) != sz(b)) cerr << "error: wrong dimensions" << FAIL; + vector> res(n, vector(m)); + for (int i = 0; i < n; i++) { + for (int j = 0; j < m; j++) { + for (int k = 0; k < x; k++) { + res[i][j] += a[i][k] * b[k][j]; + res[i][j] %= mod; + } + } + } + return res; +} + +//this should just not crash... +void test_square() { + ll queries = 0; + hash_t hash = 0; + for (int tries = 0; tries < 1'000; tries++) { + int n = Random::integer(1, 30); + + vector> m(n); + for (auto& v : m) v = Random::integers(n, 0, mod); + mat = m; + gauss(n, mod); + + for (int i = 0; i < n; i++) { + for (int j = 0; j < n; j++) { + hash += mat[i][j]; + } + } + + queries += n; + } + cerr << "tested sqaures: " << queries << " (hash: " << hash << ")" << endl;; +} + +void stress_test_inv() { + ll queries = 0; + for (int tries = 0; tries < 20'000; tries++) { + int n = Random::integer(1, 30); + + vector> m(n); + for (auto& v : m) v = Random::integers(n, 0, mod); + // m hopefully has full rank... + + auto inv = inverseMat(m); + + auto prod = mul(m, inv); + + for (int i = 0; i < n; i++) { + for (int j = 0; j < n; j++) { + if (i == j && prod[i][j] != 1) cerr << "error: not inverted" << FAIL; + if (i != j && prod[i][j] != 0) cerr << "error: not inverted" << FAIL; + } + } + + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 250; +void performance_test() { + timer t; + + vector> m(N); + for (auto& v : m) v = Random::integers(N, 0, mod); + mat = m; + + t.start(); + gauss(N, mod); + t.stop(); + hash_t hash = 0; + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { + hash += mat[i][j]; + } + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + test_square(); + stress_test_inv(); + performance_test(); +} diff --git a/test/math/linearCongruence.cpp b/test/math/linearCongruence.cpp new file mode 100644 index 0000000..ba8eeac --- /dev/null +++ b/test/math/linearCongruence.cpp @@ -0,0 +1,53 @@ +#include "../util.h" +#include +#include +#include + +ll naive(ll a, ll b, ll m) { + for (ll x = 0; x < m; x++) { + if ((a * x) % m == b) return x; + } + return -1; +} + +void stress_test() { + ll work = 0; + ll positive = 0; + for (ll tries = 0; tries < 500'000; tries++) { + ll m = Random::integer(0, 1'000); + ll a = Random::integer(0, m); + ll b = Random::integer(0, m); + + ll got = solveLinearCongruence(a, b, m); + ll expected = naive(a, b, m); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << endl; + work++; + if (got >= 0) positive++; + } + cerr << "stress tested: " << work << " (" << positive << ")" << endl; +} + +constexpr int N = 500'000; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll m = Random::integer(0, 1'0000'000'000); + ll a = Random::integer(0, m); + ll b = Random::integer(0, m); + + t.start(); + hash += solveLinearCongruence(a, b, m); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/linearRecurence.cpp b/test/math/linearRecurence.cpp new file mode 100644 index 0000000..a5290e5 --- /dev/null +++ b/test/math/linearRecurence.cpp @@ -0,0 +1,54 @@ +#include "../util.h" +#include + +struct RandomRecurence { + vector f, c, cache; + RandomRecurence(int n) : f(Random::integers(n, 0, mod)), c(Random::integers(n, 0, mod)), cache(f) {} + + ll operator()(ll k){ + while (sz(cache) <= k) { + ll cur = 0; + for (ll i = 0; i < sz(c); i++) { + cur += (c[i] * cache[sz(cache) - i - 1]) % mod; + } + cur %= mod; + cache.push_back(cur); + } + return cache[k]; + } +}; + +void stress_test() { + int queries = 0; + for (int i = 0; i < 10'000; i++) { + int n = Random::integer(1, 10); + RandomRecurence f(n); + for (int j = 0; j < 100; j++) { + ll k = Random::integer(0, 1000); + + ll got = kthTerm(f.f, f.c, k); + ll expected = f(k); + + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries++; + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'000; +void performance_test() { + timer t; + RandomRecurence f(N); + t.start(); + hash_t hash = kthTerm(f.f, f.c, 1e18); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/linearSieve.cpp b/test/math/linearSieve.cpp new file mode 100644 index 0000000..8ea822b --- /dev/null +++ b/test/math/linearSieve.cpp @@ -0,0 +1,71 @@ +#include "../util.h" +namespace expected { +#include +} +#pragma GCC diagnostic ignored "-Wunused-parameter" +#include + +void stress_test() { + expected::primeSieve(); + expected::primes.resize(primes.size()); + if (expected::primes != primes) cerr << "error: primes" << FAIL; + int queries = 0; + for (int i = 1; i < 1'000'000; i++) { + auto got = sieved[i]; + auto expected = naive(i); + if (got != expected) cerr << i << ", got: " << got << ", expected: " << expected << FAIL; + queries++; + } + for (int i = 0; i < 1'000'000; i++) { + ll x = Random::integer(2, N); + auto got = sieved[x]; + auto expected = naive(x); + if (got != expected) cerr << x << ", got: " << got << ", expected: " << expected << FAIL; + queries++; + } + cerr << "tested queries: " << queries << endl; +} + +void test_tiny() { + if (mu( 3, 3, 1) != -1) cerr << "error: 1" << FAIL; + if (mu( 9, 3, 2) != 0) cerr << "error: 2" << FAIL; + if (mu(27, 3, 3) != 0) cerr << "error: 3" << FAIL; + + if (phi( 3, 3, 1) != 2) cerr << "error: 4" << FAIL; + if (phi( 9, 3, 2) != 6) cerr << "error: 5" << FAIL; + if (phi(27, 3, 3) != 18) cerr << "error: 6" << FAIL; + + if (div( 3, 3, 1) != 2) cerr << "error: 7" << FAIL; + if (div( 9, 3, 2) != 3) cerr << "error: 8" << FAIL; + if (div(27, 3, 3) != 4) cerr << "error: 9" << FAIL; + + if (divSum( 3, 3, 1) != 4) cerr << "error: 10" << FAIL; + if (divSum( 9, 3, 2) != 13) cerr << "error: 11" << FAIL; + if (divSum(27, 3, 3) != 40) cerr << "error: 12" << FAIL; + + if (square( 3, 3, 1) != 1) cerr << "error: 13" << FAIL; + if (square( 9, 3, 2) != 9) cerr << "error: 14" << FAIL; + if (square(27, 3, 3) != 9) cerr << "error: 15" << FAIL; + + if (squareFree( 3, 3, 1) != 3) cerr << "error: 13" << FAIL; + if (squareFree( 9, 3, 2) != 3) cerr << "error: 14" << FAIL; + if (squareFree(27, 3, 3) != 3) cerr << "error: 15" << FAIL; + cerr << "tested tiny" << endl; +} + +void performance_test() { + timer t; + t.start(); + sieve(); + hash_t hash = sz(primes); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + performance_test(); + stress_test(); + test_tiny(); +} + diff --git a/test/math/longestIncreasingSubsequence.cpp b/test/math/longestIncreasingSubsequence.cpp new file mode 100644 index 0000000..407dafe --- /dev/null +++ b/test/math/longestIncreasingSubsequence.cpp @@ -0,0 +1,76 @@ +#include "../util.h" +constexpr ll INF = LL::INF; +#include +#define lis unstrictLis +#define lower_bound upper_bound +#include +#undef lower_bound +#undef lis + +template +bool isLis(const vector& a, const vector& lis) { + for (int i = 1; i < sz(lis); i++) { + if (lis[i-1] >= lis[i]) return false; + if (a[lis[i-1]] > a[lis[i]]) return false; + if (STRICT && a[lis[i-1]] == a[lis[i]]) return false; + } + return true; +} + +template +vector naive(const vector& a) { + vector res; + for (ll i = 1; i < (1ll << sz(a)); i++) { + vector tmp; + for (ll j = 0; j < sz(a); j++) { + if (((i >> j) & 1) != 0) tmp.push_back(j); + } + if (sz(tmp) >= sz(res) && isLis(a, tmp)) res = tmp; + } + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = Random::integer(1, 12); + auto a = Random::integers(n, -10, 10); + auto expected = naive(a); + auto got = lis(a); + if (got != expected) cerr << "error: strict" << FAIL; + queries += n; + } + for (ll i = 0; i < 10'000; i++) { + int n = Random::integer(1, 12); + auto a = Random::integers(n, -10, 10); + auto expected = naive(a); + auto got = unstrictLis(a); + if (got != expected) cerr << "error: not strict" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + auto a = Random::integers(N, -10'000, 10'000); + auto b = Random::integers(N, -10'000, 10'000); + sort(all(b)); + auto c = Random::integers(N, -10'000, 10'000); + sort(all(c)); + reverse(all(c)); + hash_t hash = 0; + t.start(); + hash += lis(a).size(); + hash += lis(b).size(); + hash += lis(c).size(); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/math/matrixPower.cpp b/test/math/matrixPower.cpp new file mode 100644 index 0000000..4dfb0a8 --- /dev/null +++ b/test/math/matrixPower.cpp @@ -0,0 +1,116 @@ +#include "../util.h" + +constexpr ll mod = 1'394'633'899; + +struct mat { + vector> m; + mat(int dim = 0, int diag = 1) : m(dim, vector(dim)) { + for (int i = 0; i < dim; i++) m[i][i] = diag; + } + mat(const vector c) : m(sz(c), vector(sz(c))) { + m[0] = c; + for (ll i = 1; i < sz(c); i++) { + m[i][i-1] = 1; + } + } + + mat operator*(const mat& o) const { + int dim = sz(m); + mat res(dim, 0); + for (int i = 0; i < dim; i++) { + for (int j = 0; j < dim; j++) { + for (int k = 0; k < dim; k++) { + res.m[i][j] += m[i][k] * o.m[k][j]; + res.m[i][j] %= mod; + } + } + } + return res; + } + + vector operator*(const vector& o) const { + int dim = sz(m); + vector res(dim); + for (int i = 0; i < dim; i++) { + for (int j = 0; j < dim; j++) { + res[i] += m[i][j] * o[j]; + res[i] %= mod; + } + } + return res; + } +}; + +#include + +struct RandomRecurence { + vector f, c, cache; + RandomRecurence(int n) : f(Random::integers(n, 0, mod)), c(Random::integers(n, 0, mod)), cache(f) {} + + ll operator()(ll k){ + while (sz(cache) <= k) { + ll cur = 0; + for (ll i = 0; i < sz(c); i++) { + cur += (c[i] * cache[sz(cache) - i - 1]) % mod; + } + cur %= mod; + cache.push_back(cur); + } + return cache[k]; + } +}; + +void stress_test() { + int queries = 0; + for (int i = 0; i < 10'000; i++) { + int n = Random::integer(1, 10); + RandomRecurence f(n); + precalc(mat(f.c)); + auto tmp = f.f; + reverse(all(tmp)); + + for (int j = 0; j < 100; j++) { + ll k = Random::integer(0, 1000); + + vector got = calc(k, tmp); + vector expected(sz(f.f)); + for (ll l = 0; l < n; l++) expected[n - 1 - l] = f(k + l); + + if (got != expected) cerr << "error" << FAIL; + queries++; + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 100; +constexpr int M = 500; +void performance_test() { + timer t; + RandomRecurence f(N); + auto tmp = f.f; + reverse(all(tmp)); + + t.start(); + precalc(mat(f.c)); + t.stop(); + if (t.time > 500) cerr << "too slow precalc: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms" << endl; + + t.reset(); + hash_t hash = 0; + for (int i = 0; i < M; i++) { + ll k = Random::integer(1e17,1e18); + t.start(); + hash += calc(k, tmp).back(); + t.stop(); + } + if (t.time > 750) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/millerRabin.base32.cpp b/test/math/millerRabin.base32.cpp new file mode 100644 index 0000000..742d353 --- /dev/null +++ b/test/math/millerRabin.base32.cpp @@ -0,0 +1,137 @@ +#include "../util.h" +#define ll lll +#include +#undef ll + +//this is hacky... +#define bool }\ +constexpr auto bases64 = c20::to_array(ignore::bases32);\ +bool +namespace ignore { +#include +#undef bool + +bool naive(ll x) { + for (ll i = 2; i*i <= x; i++) { + if (x % i == 0) return false; + } + return x > 1; +} + +ll mul(const map& facts) { + ll res = 1; + for (auto [p, c] : facts) { + for (int i = 0; i < c; i++) res *= p; + } + if (abs(res) > (1ll << 62)) cerr << "invalid number: " << res << FAIL; + return res; +} + +void extra_tests() { + vector> test = { + {{-1, 1}, {1, 1}}, + {{-2, 1}, {1, 1}}, + {{-7, 1}, {1, 1}}, + {{-19812365821, 1}, {1, 1}}, + {}, // 1 + {{2, 1}}, + {{3, 1}}, + {{2, 2}}, + {{5, 1}}, + {{2, 1}, {3, 1}}, + {{2, 2}, {3, 1}}, + {{2, 1}, {3, 2}}, + {{2, 2}, {3, 2}}, + {{2, 62}}, + {{2, 18}, {5, 18}}, + {{352523, 1}, {352817, 1}}, + {{41, 1}, {71, 1}, {421, 1}, {811, 1}}, + {{11, 1}, {17, 1}, {31, 1}, {61, 1}, {73, 1}, {66361, 1}}, + {{500000003, 1}, {1999999973, 1}}, + {{65537, 2}}, + {{999665081, 1}, {999716071, 1}}, + {{550177, 1}, {1100353, 1}, {1650529, 1}}, + {{459397, 1}, {918793, 1}, {1378189, 1}}, + {{37, 1}, {109, 1}}, + {{31, 1}, {151, 1}}, + {{239, 1}, {1429, 1}}, + {{89, 1}, {1093, 1}}, + {{2, 3}, {15800133918749317, 1}}, + {{12251, 1}, {85751, 1}}, + {{3, 1}, {5, 3}, {131, 1}, {6855593, 1}}, + {{5, 1}, {1927962474784631, 1}}, + {{197279, 1}, {1775503, 1}}, + {{3716371, 1}, {14865481, 1}}, + {{3, 1}, {5, 1}, {3075593, 1}, {3075593, 1}}, + {{4880401, 1}, {9760801, 1}}, + {{2822159, 1}, {11288633, 1}}, + {{3290341, 1}, {6580681, 1}}, + {{611557, 1}, {1834669, 1}}, + {{9227, 1}, {894923, 1}, {968731, 1}}, + {{3, 4}, {13, 1}, {62633, 2}}, + {{2, 2}, {3, 1}, {5, 1}, {167, 2}, {299197, 2}}, + {{332721269, 1}, {560937673, 1}}, + {{30702523, 1}, {122810089, 1}}, + {{24786439, 1}, {123932191, 1}}, + {{382500329, 1}, {1530001313, 1}}, + {{2, 4}, {5, 4}, {13, 1}, {30839, 2}}, + {{3, 1}, {385417, 1}, {7985344259, 1}}, + {{2, 4}, {3, 1}, {5, 1}, {7, 2}, {61, 1}, {179, 2}, {1381, 2}}, + {{627838711, 1}, {1212379867, 1}}, + {{3, 5}, {5, 3}, {41, 2}, {157321, 2}}, + {{5, 2}, {13, 1}}, + {{3, 1}, {5, 5}}, + {{2, 1}, {73, 1}, {193, 1}}, + {{5, 2}, {13, 1}, {19, 1}, {73, 1}}, + {{2, 3}, {3, 1}, {407521, 1}}, + {{2, 1}, {3, 1}, {299210837, 1}}, + {{2, 8}, {3, 4}, {5, 2}, {7, 2}, {11, 1}, {13, 1}, {17, 1}, {19, 1}, {23, 1}, {29, 1}, {3137, 1}}, + }; + + timer t; + for (auto factors : test) { + ll x = mul(factors); + if (x >= 1ll << 32) continue; + t.start(); + auto got = isPrime(x); + t.stop(); + bool expected = sz(factors) == 1 && factors.begin()->second == 1; + if (got != expected) cerr << "error: " << x << FAIL; + } + if (t.time > 10) cerr << "too slow" << FAIL; + cerr << "stress tested: " << t.time << "ms" << endl; +} + +void stress_test() { + ll work = 0; + for (ll i = 0; i < 10'000; i++) { + ll x = Random::integer(1, 1ll << 32); + auto got = isPrime(x); + auto expected = naive(x); + if (got != expected) cerr << "error: " << x << FAIL; + work += sqrt(x); + } + cerr << "stress tested: " << work << endl; +} + +constexpr int N = 200'000; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll x = Random::integer(1ll << 31, 1ll << 32); + t.start(); + hash += isPrime(x); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + + +int main() { + extra_tests(); + stress_test(); + performance_test(); +} + diff --git a/test/math/millerRabin.cpp b/test/math/millerRabin.cpp new file mode 100644 index 0000000..fd98586 --- /dev/null +++ b/test/math/millerRabin.cpp @@ -0,0 +1,129 @@ +#include "../util.h" +#define ll lll +#include +#undef ll +#include + +bool naive(ll x) { + for (ll i = 2; i*i <= x; i++) { + if (x % i == 0) return false; + } + return x > 1; +} + +ll mul(const map& facts) { + ll res = 1; + for (auto [p, c] : facts) { + for (int i = 0; i < c; i++) res *= p; + } + if (abs(res) > (1ll << 62)) cerr << "invalid number: " << res << FAIL; + return res; +} + +void extra_tests() { + vector> test = { + {{-1, 1}, {1, 1}}, + {{-2, 1}, {1, 1}}, + {{-7, 1}, {1, 1}}, + {{-19812365821, 1}, {1, 1}}, + {}, // 1 + {{2, 1}}, + {{3, 1}}, + {{2, 2}}, + {{5, 1}}, + {{2, 1}, {3, 1}}, + {{2, 2}, {3, 1}}, + {{2, 1}, {3, 2}}, + {{2, 2}, {3, 2}}, + {{2, 62}}, + {{2, 18}, {5, 18}}, + {{352523, 1}, {352817, 1}}, + {{41, 1}, {71, 1}, {421, 1}, {811, 1}}, + {{11, 1}, {17, 1}, {31, 1}, {61, 1}, {73, 1}, {66361, 1}}, + {{500000003, 1}, {1999999973, 1}}, + {{65537, 2}}, + {{999665081, 1}, {999716071, 1}}, + {{550177, 1}, {1100353, 1}, {1650529, 1}}, + {{459397, 1}, {918793, 1}, {1378189, 1}}, + {{37, 1}, {109, 1}}, + {{31, 1}, {151, 1}}, + {{239, 1}, {1429, 1}}, + {{89, 1}, {1093, 1}}, + {{2, 3}, {15800133918749317, 1}}, + {{12251, 1}, {85751, 1}}, + {{3, 1}, {5, 3}, {131, 1}, {6855593, 1}}, + {{5, 1}, {1927962474784631, 1}}, + {{197279, 1}, {1775503, 1}}, + {{3716371, 1}, {14865481, 1}}, + {{3, 1}, {5, 1}, {3075593, 1}, {3075593, 1}}, + {{4880401, 1}, {9760801, 1}}, + {{2822159, 1}, {11288633, 1}}, + {{3290341, 1}, {6580681, 1}}, + {{611557, 1}, {1834669, 1}}, + {{9227, 1}, {894923, 1}, {968731, 1}}, + {{3, 4}, {13, 1}, {62633, 2}}, + {{2, 2}, {3, 1}, {5, 1}, {167, 2}, {299197, 2}}, + {{332721269, 1}, {560937673, 1}}, + {{30702523, 1}, {122810089, 1}}, + {{24786439, 1}, {123932191, 1}}, + {{382500329, 1}, {1530001313, 1}}, + {{2, 4}, {5, 4}, {13, 1}, {30839, 2}}, + {{3, 1}, {385417, 1}, {7985344259, 1}}, + {{2, 4}, {3, 1}, {5, 1}, {7, 2}, {61, 1}, {179, 2}, {1381, 2}}, + {{627838711, 1}, {1212379867, 1}}, + {{3, 5}, {5, 3}, {41, 2}, {157321, 2}}, + {{5, 2}, {13, 1}}, + {{3, 1}, {5, 5}}, + {{2, 1}, {73, 1}, {193, 1}}, + {{5, 2}, {13, 1}, {19, 1}, {73, 1}}, + {{2, 3}, {3, 1}, {407521, 1}}, + {{2, 1}, {3, 1}, {299210837, 1}}, + {{2, 8}, {3, 4}, {5, 2}, {7, 2}, {11, 1}, {13, 1}, {17, 1}, {19, 1}, {23, 1}, {29, 1}, {3137, 1}}, + }; + + timer t; + for (auto factors : test) { + ll x = mul(factors); + t.start(); + auto got = isPrime(x); + t.stop(); + bool expected = sz(factors) == 1 && factors.begin()->second == 1; + if (got != expected) cerr << "error: " << x << FAIL; + } + if (t.time > 10) cerr << "too slow" << FAIL; + cerr << "stress tested: " << t.time << "ms" << endl; +} + +void stress_test() { + ll work = 0; + for (ll i = 0; i < 10'000; i++) { + ll x = Random::integer(1, 1'000'000'000'000); + auto got = isPrime(x); + auto expected = naive(x); + if (got != expected) cerr << "error: " << x << FAIL; + work += sqrt(x); + } + cerr << "stress tested: " << work << endl; +} + +constexpr int N = 200'000; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll x = Random::integer(1e18 / 2, 1e18); + t.start(); + hash += isPrime(x); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + + +int main() { + extra_tests(); + stress_test(); + performance_test(); +} + diff --git a/test/math/modExp.cpp b/test/math/modExp.cpp new file mode 100644 index 0000000..ebb38eb --- /dev/null +++ b/test/math/modExp.cpp @@ -0,0 +1,42 @@ +#include "../util.h" +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int a = Random::integer(1, 100); + int n = Random::integer(2, 100); + ll expected = 1; + ll k = 0; + do { + auto got = powMod(a, k, n); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + k++; + expected = (expected * a) % n; + } while (k < 100); + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll a = Random::integer(0, 1'000'000'000); + ll b = Random::integer(0, 1'000'000'000); + ll n = Random::integer(2, 1'000'000'000); + t.start(); + hash += powMod(a, b, n); + t.stop(); + } + if (t.time > 750) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/modMulIterativ.cpp b/test/math/modMulIterativ.cpp new file mode 100644 index 0000000..4f794c5 --- /dev/null +++ b/test/math/modMulIterativ.cpp @@ -0,0 +1,57 @@ +#include "../util.h" +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int a = Random::integer(1, 100); + int n = Random::integer(2, 100); + ll expected = 0; + ll k = 0; + do { + auto got = mulMod(a, k, n); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + k++; + expected = (expected + a) % n; + } while (k < 100); + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +void stress_test_large() { + ll queries = 0; + for (ll i = 0; i < 1000'000; i++) { + ll a = Random::integer(0, 1'000'000'000'000'000'000); + ll b = Random::integer(0, 1'000'000'000'000'000'000); + ll n = Random::integer(2, 1'000'000'000'000'000'000); + ll expected = (lll)a * b % n; + auto got = mulMod(a, b, n); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 500'000; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll a = Random::integer(0, 1'000'000'000'000'000'000); + ll b = Random::integer(0, 1'000'000'000'000'000'000); + ll n = Random::integer(2, 1'000'000'000'000'000'000); + t.start(); + hash += mulMod(a, b, n); + t.stop(); + } + if (t.time > 750) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + stress_test_large(); + performance_test(); +} + diff --git a/test/math/modPowIterativ.cpp b/test/math/modPowIterativ.cpp new file mode 100644 index 0000000..2cf0eb4 --- /dev/null +++ b/test/math/modPowIterativ.cpp @@ -0,0 +1,42 @@ +#include "../util.h" +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int a = Random::integer(1, 100); + int n = Random::integer(2, 100); + ll expected = 1; + ll k = 0; + do { + auto got = powMod(a, k, n); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + k++; + expected = (expected * a) % n; + } while (k < 100); + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll a = Random::integer(0, 1'000'000'000); + ll b = Random::integer(0, 1'000'000'000); + ll n = Random::integer(2, 1'000'000'000); + t.start(); + hash += powMod(a, b, n); + t.stop(); + } + if (t.time > 750) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/multInv.cpp b/test/math/multInv.cpp new file mode 100644 index 0000000..93763c5 --- /dev/null +++ b/test/math/multInv.cpp @@ -0,0 +1,40 @@ +#include "../util.h" +#include +#include + +void stress_test() { + ll queries = 0; + for (int i = 0; i < 10'000'000; i++) { + ll n = Random::integer(2, 1'000'000'000); + ll x = 0; + do { + x = Random::integer(0, n); + } while (gcd(x, n) != 1); + ll y = multInv(x, n); + ll got = (x*y) % n; + if (got != 1) cerr << "got: " << got << ", expected: 1" << FAIL; + queries++; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll a = Random::integer(0, 1'000'000'000); + ll b = Random::integer(2, 1'000'000'000); + t.start(); + hash += multInv(a, b); + t.stop(); + } + if (t.time > 750) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/permIndex.cpp b/test/math/permIndex.cpp new file mode 100644 index 0000000..61d34c8 --- /dev/null +++ b/test/math/permIndex.cpp @@ -0,0 +1,39 @@ +#include "../util.h" +#include +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = Random::integer(1, 100); + vector cur(n); + iota(all(cur), 0); + ll expected = 0; + do { + auto got = permIndex(cur); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + expected++; + } while (expected < 100 && next_permutation(all(cur))); + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 500'000; +void performance_test() { + timer t; + vector cur(N); + iota(all(cur), 0); + reverse(cur.end() - 10, cur.end()); + t.start(); + auto hash = permIndex(cur); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/piLegendre.cpp b/test/math/piLegendre.cpp new file mode 100644 index 0000000..c3513bf --- /dev/null +++ b/test/math/piLegendre.cpp @@ -0,0 +1,40 @@ +#include "../util.h" +#include +namespace legendre { + #include +} +namespace lehmer { + #include +} + +void stress_test() { + int queries = 0; + for (int i = 0; i < 1'000; i++) { + ll x = Random::integer(0, 1'000'000'000); + auto got = legendre::pi(x); + auto expected = lehmer::pi(x); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries++; + } + cerr << "tested random queries: " << queries << endl; +} + +void performance_test() { + timer t; + hash_t hash = 0; + for (int i = 0; i < 1; i++) { + ll x = Random::integer(0, 1000'000'000'000); + t.start(); + hash += legendre::pi(x); + t.stop(); + } + if (t.time > 1500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + lehmer::init(); + performance_test(); + stress_test(); +} + diff --git a/test/math/piLehmer.cpp b/test/math/piLehmer.cpp new file mode 100644 index 0000000..d84466f --- /dev/null +++ b/test/math/piLehmer.cpp @@ -0,0 +1,42 @@ +#include "../util.h" +#include +namespace legendre { + #include +} +namespace lehmer { + #include +} + +void stress_test() { + int queries = 0; + for (int i = 0; i < 1'000; i++) { + ll x = Random::integer(0, 1'000'000'000); + auto got = lehmer::pi(x); + auto expected = legendre::pi(x); + if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL; + queries++; + } + cerr << "tested random queries: " << queries << endl; +} + +void performance_test() { + timer t; + hash_t hash = 0; + t.start(); + lehmer::init(); + t.stop(); + for (int i = 0; i < 1; i++) { + ll x = Random::integer(0, 1000'000'000'000); + t.start(); + hash += lehmer::pi(x); + t.stop(); + } + if (t.time > 1500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + performance_test(); + stress_test(); +} + diff --git a/test/math/primeSieve.cpp b/test/math/primeSieve.cpp new file mode 100644 index 0000000..78a50d2 --- /dev/null +++ b/test/math/primeSieve.cpp @@ -0,0 +1,47 @@ +#include "../util.h" +#include + +bool naive(ll x) { + for (ll i = 2; i*i <= x; i++) { + if (x % i == 0) return false; + } + return x > 1; +} + +void stress_test() { + int queries = 0; + vector found; + for (int i = -5; i < 1'000'000; i++) { + auto got = isPrime(i); + auto expected = naive(i); + if (got != expected) cerr << "error: " << i << FAIL; + if (got) found.push_back(i); + queries++; + } + primes.resize(sz(found)); + if (primes != found) cerr << "error: primes" << FAIL; + for (int i = 0; i < 1'000'000; i++) { + ll x = Random::integer(2, N); + auto got = isPrime(x); + auto expected = naive(x); + if (got != expected) cerr << "error: " << x << FAIL; + queries++; + } + cerr << "tested queries: " << queries << endl; +} + +void performance_test() { + timer t; + t.start(); + primeSieve(); + hash_t hash = sz(primes); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + performance_test(); + stress_test(); +} + diff --git a/test/math/primitiveRoot.cpp b/test/math/primitiveRoot.cpp new file mode 100644 index 0000000..cd0b388 --- /dev/null +++ b/test/math/primitiveRoot.cpp @@ -0,0 +1,82 @@ +#include "../util.h" +#define ll lll +#include +#undef ll +#include +#include + +ll phi(ll pk, ll p, ll /*k*/) {return pk - pk / p;} +ll phi(ll n) { // O(sqrt(n)) + ll res = 1; + for (ll p = 2; p * p <= n; p++) { + if (n % p == 0) { + ll pk = 1; + ll k = 0; + do { + n /= p; + pk *= p; + k++; + } while (n % p == 0); + res *= phi(pk, p, k); + }} + if (n > 1) res *= phi(n, n, 1); + return res; +} + +#include + +bool naiveIsPrimitive(ll g, ll n) { + if (gcd(g, n) != 1) return false; + vector seen(n); + ll c = g; + for (ll i = 0; i < n; i++) { + seen[c] = true; + c = (c * g) % n; + } + ll res = 0; + for (bool x : seen) if (x) res++; + return res == phi(n); +} + +void stress_test() { + int queries = 0; + for (int tries = 0; tries < 20'000; tries++) { + ll a = Random::integer(1, 3); + ll p = Random::prime(0, 1000); + ll k = p == 2 ? 1 : Random::integer(1, log(100'000) / log(p) + 1); + + ll x = a; + for (int i = 0; i < k; i++) x *= p; + + ll got = findPrimitive(x); + + if (got < 0 || got >= x) cerr << "error: out of range" << FAIL; + if (!naiveIsPrimitive(got, x)) cerr << "error: wrong" << got << " " << x << FAIL; + queries++; + } + cerr << "tested random queries: " << queries << endl; +} + +void stress_test2() { + int queries = 0; + for (int x = 2; x < 5'000; x++) { + map facts; + factor(x, facts); + if (x % 2 == 0) facts.erase(facts.find(2)); + bool expected = sz(facts) == 1; + if (x % 4 == 0) expected = false; + if (x == 2 || x == 4) expected = true; + + bool got = findPrimitive(x) >= 0; + + if (got != expected) cerr << "error" << FAIL; + queries++; + } + cerr << "tested random queries: " << queries << endl; +} + +int main() { + stress_test(); + stress_test2(); +} + diff --git a/test/math/rho.cpp b/test/math/rho.cpp new file mode 100644 index 0000000..5e4792a --- /dev/null +++ b/test/math/rho.cpp @@ -0,0 +1,117 @@ +#include "../util.h" +#define ll lll +#include +#undef ll +#include +#include + +map factor(ll n) { + map facts; + factor(n, facts); + return facts; +} + +ll mul(const map& facts) { + ll res = 1; + for (auto [p, c] : facts) { + for (int i = 0; i < c; i++) res *= p; + } + if (res < 1 || res > (1ll << 62)) cerr << "invalid number: " << res << FAIL; + return res; +} + +void stress_test() { + vector> test = { + {}, // 1 + {{2, 1}}, + {{3, 1}}, + {{2, 2}}, + {{5, 1}}, + {{2, 1}, {3, 1}}, + {{2, 2}, {3, 1}}, + {{2, 1}, {3, 2}}, + {{2, 2}, {3, 2}}, + {{2, 62}}, + {{2, 18}, {5, 18}}, + {{352523, 1}, {352817, 1}}, + {{41, 1}, {71, 1}, {421, 1}, {811, 1}}, + {{11, 1}, {17, 1}, {31, 1}, {61, 1}, {73, 1}, {66361, 1}}, + {{500000003, 1}, {1999999973, 1}}, + {{65537, 2}}, + {{999665081, 1}, {999716071, 1}}, + {{550177, 1}, {1100353, 1}, {1650529, 1}}, + {{459397, 1}, {918793, 1}, {1378189, 1}}, + {{37, 1}, {109, 1}}, + {{31, 1}, {151, 1}}, + {{239, 1}, {1429, 1}}, + {{89, 1}, {1093, 1}}, + {{2, 3}, {15800133918749317, 1}}, + {{12251, 1}, {85751, 1}}, + {{3, 1}, {5, 3}, {131, 1}, {6855593, 1}}, + {{5, 1}, {1927962474784631, 1}}, + {{197279, 1}, {1775503, 1}}, + {{3716371, 1}, {14865481, 1}}, + {{3, 1}, {5, 1}, {3075593, 1}, {3075593, 1}}, + {{4880401, 1}, {9760801, 1}}, + {{2822159, 1}, {11288633, 1}}, + {{3290341, 1}, {6580681, 1}}, + {{611557, 1}, {1834669, 1}}, + {{9227, 1}, {894923, 1}, {968731, 1}}, + {{3, 4}, {13, 1}, {62633, 2}}, + {{2, 2}, {3, 1}, {5, 1}, {167, 2}, {299197, 2}}, + {{332721269, 1}, {560937673, 1}}, + {{30702523, 1}, {122810089, 1}}, + {{24786439, 1}, {123932191, 1}}, + {{382500329, 1}, {1530001313, 1}}, + {{2, 4}, {5, 4}, {13, 1}, {30839, 2}}, + {{3, 1}, {385417, 1}, {7985344259, 1}}, + {{2, 4}, {3, 1}, {5, 1}, {7, 2}, {61, 1}, {179, 2}, {1381, 2}}, + {{627838711, 1}, {1212379867, 1}}, + {{3, 5}, {5, 3}, {41, 2}, {157321, 2}}, + {{5, 2}, {13, 1}}, + {{3, 1}, {5, 5}}, + {{2, 1}, {73, 1}, {193, 1}}, + {{5, 2}, {13, 1}, {19, 1}, {73, 1}}, + {{2, 3}, {3, 1}, {407521, 1}}, + {{2, 1}, {3, 1}, {299210837, 1}}, + {{2, 8}, {3, 4}, {5, 2}, {7, 2}, {11, 1}, {13, 1}, {17, 1}, {19, 1}, {23, 1}, {29, 1}, {3137, 1}}, + }; + + timer t; + for (auto expected : test) { + ll x = mul(expected); + t.start(); + auto got = factor(x); + t.stop(); + if (got != expected) { + cerr << "number: " << x << endl; + cerr << "got:" << endl; + for (auto [p, c] : got) cerr << p << "^" << c << endl; + cerr << "expected" << endl; + for (auto [p, c] : expected) cerr << p << "^" << c << endl; + cerr << FAIL; + } + } + if (t.time > 100) cerr << "too slow" << FAIL; + cerr << "stress tested: " << t.time << "ms" << endl; +} + +constexpr int N = 2'000; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll x = Random::integer(1e18 / 2, 1e18); + t.start(); + hash += factor(x).size(); + t.stop(); + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/shortModInv.cpp b/test/math/shortModInv.cpp new file mode 100644 index 0000000..26960bf --- /dev/null +++ b/test/math/shortModInv.cpp @@ -0,0 +1,39 @@ +#include "../util.h" +#include + +void stress_test() { + ll queries = 0; + for (int i = 0; i < 10'000'000; i++) { + ll n = Random::integer(2, 1'000'000'000); + ll x = 0; + do { + x = Random::integer(0, n); + } while (gcd(x, n) != 1); + ll y = multInv(x, n); + ll got = (x*y) % n; + if (got != 1) cerr << "got: " << got << ", expected: 1" << FAIL; + queries++; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll a = Random::integer(0, 1'000'000'000); + ll b = Random::integer(2, 1'000'000'000); + t.start(); + hash += multInv(a, b); + t.stop(); + } + if (t.time > 750) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/simpson.cpp b/test/math/simpson.cpp new file mode 100644 index 0000000..d7cdba3 --- /dev/null +++ b/test/math/simpson.cpp @@ -0,0 +1,63 @@ +#include "../util.h" +std::function f; +constexpr double EPS = 1e-9; +#include + +struct RandomPolynom { + vector polynom; + RandomPolynom(int deg) : polynom(deg) { + for (auto& x : polynom) x = Random::real(-100, 100); + } + double operator()(double x) const { + double res = 0; + double xx = 1; + for (double y : polynom ) { + res += xx * y; + xx *= x; + } + return res; + } + double area(double a, double b) const { + double res = 0; + double aa = a; + double bb = b; + ll d = 1; + for (double y : polynom) { + res += bb / d * y; + res -= aa / d * y; + aa *= a; + bb *= b; + d++; + } + return res; + } +}; + +void stress_test() { + timer t; + ll queries = 0; + for (int tries = 0; tries < 1'000; tries++) { + ll n = Random::integer(0, 6); + RandomPolynom poly(n); + f = poly; + for (ll i = 0; i < 200; i++) { + double l = Random::real(-20, 20); + double r = Random::real(-20, 20); + if (l > r) swap(l, r); + + t.start(); + double got = integrate(l, r); + t.stop(); + double expected = poly.area(l, r); + if (float_error(got, expected) > 1e-6) cerr << fixed << setprecision(20) << "got: " << got << ", expected: " << expected << FAIL; + queries++; + } + } + if (t.time > 5000) cerr << "too slow: " << t.time << FAIL; + cerr << "tested random queries: " << queries << " (" << t.time << "ms)" << endl; +} + +int main() { + stress_test(); +} + diff --git a/test/math/sqrtModCipolla.cpp b/test/math/sqrtModCipolla.cpp new file mode 100644 index 0000000..26d975b --- /dev/null +++ b/test/math/sqrtModCipolla.cpp @@ -0,0 +1,48 @@ +#include "../util.h" +#include +#include +mt19937 rng(123456789); +#include + +void stress_test(ll range) { + ll work = 0; + for (ll i = 0; i < 10'000; i++) { + ll p = Random::prime(range); + for (ll j = 0; j < 100; j++) { + ll x = Random::integer(0, p); + if (legendre(x, p) < 0) continue; + + ll got = sqrtMod(x, p); + if (got < 0 || got >= p) cerr << "error: out of range" << FAIL; + if ((got * got) % p != x) cerr << "error: not root" << FAIL; + work++; + } + } + cerr << "stress tested: " << work << endl; +} + +constexpr int N = 200'000; +constexpr ll mod = 1'394'633'899; +void performance_test() { + timer t; + hash_t hash = 0; + for (int operations = 0; operations < N; operations++) { + ll x; + do { + x = Random::integer(0, mod); + } while (legendre(x, mod) >= 0); + t.start(); + hash += sqrtMod(x, mod); + t.stop(); + } + if (t.time > 750) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + + +int main() { + stress_test(1'000); + stress_test(1'000'000'000); + performance_test(); +} + diff --git a/test/math/transforms/andTransform.cpp b/test/math/transforms/andTransform.cpp new file mode 100644 index 0000000..fa029f6 --- /dev/null +++ b/test/math/transforms/andTransform.cpp @@ -0,0 +1,38 @@ +#include "../../util.h" +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = 1ll << Random::integer(0, 10); + auto expected = Random::integers(n, -1000, 1000); + auto got = expected; + fft(got, false); + fft(got, true); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1ll << 23; +void performance_test() { + timer t; + vector a = Random::integers(N, -1000, 1000); + vector b = Random::integers(N, -1000, 1000); + t.start(); + fft(a, true); + fft(b, false); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * a[i]; + for (ll i = 0; i < N; i++) hash += i * b[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/transforms/bitwiseTransforms.cpp b/test/math/transforms/bitwiseTransforms.cpp new file mode 100644 index 0000000..132740c --- /dev/null +++ b/test/math/transforms/bitwiseTransforms.cpp @@ -0,0 +1,38 @@ +#include "../../util.h" +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = 1ll << Random::integer(0, 10); + auto expected = Random::integers(n, -1000, 1000); + auto got = expected; + bitwiseConv(got, false); + bitwiseConv(got, true); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1ll << 23; +void performance_test() { + timer t; + vector a = Random::integers(N, -1000, 1000); + vector b = Random::integers(N, -1000, 1000); + t.start(); + bitwiseConv(a, true); + bitwiseConv(b, false); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * a[i]; + for (ll i = 0; i < N; i++) hash += i * b[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/transforms/fft.cpp b/test/math/transforms/fft.cpp new file mode 100644 index 0000000..858676b --- /dev/null +++ b/test/math/transforms/fft.cpp @@ -0,0 +1,51 @@ +#include "../../util.h" +#include + +vector to_cplx(const vector& in) { + vector res(sz(in)); + for (int i = 0; i < sz(in); i++) res[i] = in[i]; + return res; +} + +vector from_cplx(const vector& in) { + vector res(sz(in)); + for (int i = 0; i < sz(in); i++) res[i] = llround(real(in[i])); + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = 1ll << Random::integer(0, 10); + auto expected = Random::integers(n, -1000, 1000); + vector tmp = to_cplx(expected); + fft(tmp, false); + fft(tmp, true); + auto got = from_cplx(tmp); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1ll << 21; +void performance_test() { + timer t; + auto a = to_cplx(Random::integers(N, -1000, 1000)); + auto b = to_cplx(Random::integers(N, -1000, 1000)); + t.start(); + fft(a, true); + fft(b, false); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * llround(real(a[i])); + for (ll i = 0; i < N; i++) hash += i * llround(real(b[i])); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/transforms/fftMul.cpp b/test/math/transforms/fftMul.cpp new file mode 100644 index 0000000..5933864 --- /dev/null +++ b/test/math/transforms/fftMul.cpp @@ -0,0 +1,62 @@ +#include "../../util.h" +#include +#include +#pragma GCC diagnostic ignored "-Wnarrowing" +#include + +vector from_cplx(const vector& in) { + vector res(sz(in)); + for (int i = 0; i < sz(in); i++) res[i] = llround(real(in[i])); + return res; +} + +vector naive(const vector& a, const vector& b) { + vector res; + for (ll i = 1;; i *= 2) { + if (sz(a) + sz(b) <= i) { + res.resize(i, 0); + break; + } + } + for (int i = 0; i < sz(a); i++) { + for (int j = 0; j < sz(b); j++) { + res[i+j] += a[i] * b[j]; + } + } + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = Random::integer(1, 100); + int m = Random::integer(1, 100); + auto a = Random::integers(n, -1000, 1000); + auto b = Random::integers(m, -1000, 1000); + auto expected = naive(a, b); + auto got = from_cplx(mul(a, b)); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1ll << 20; +void performance_test() { + timer t; + vector a = Random::integers(N, -1000, 1000); + vector b = Random::integers(N, -1000, 1000); + t.start(); + auto got = from_cplx(mul(a, b)); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * got[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/transforms/multiplyBitwise.cpp b/test/math/transforms/multiplyBitwise.cpp new file mode 100644 index 0000000..bc73290 --- /dev/null +++ b/test/math/transforms/multiplyBitwise.cpp @@ -0,0 +1,55 @@ +#include "../../util.h" +#include +#include +#include + +vector naive(const vector& a, const vector& b) { + vector res; + for (ll i = 1;; i *= 2) { + if (sz(a) <= i && sz(b) <= i) { + res.resize(i, 0); + break; + } + } + for (int i = 0; i < sz(a); i++) { + for (int j = 0; j < sz(b); j++) { + res[i&j] += a[i] * b[j]; + } + } + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = Random::integer(1, 100); + int m = Random::integer(1, 100); + auto a = Random::integers(n, -1000, 1000); + auto b = Random::integers(m, -1000, 1000); + auto expected = naive(a, b); + auto got = mul(a, b); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1ll << 22; +void performance_test() { + timer t; + vector a = Random::integers(N, -1000, 1000); + vector b = Random::integers(N, -1000, 1000); + t.start(); + auto got = mul(a, b); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * got[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/transforms/multiplyFFT.cpp b/test/math/transforms/multiplyFFT.cpp new file mode 100644 index 0000000..782be1b --- /dev/null +++ b/test/math/transforms/multiplyFFT.cpp @@ -0,0 +1,55 @@ +#include "../../util.h" +#include +#include +#include + +vector naive(const vector& a, const vector& b) { + vector res; + for (ll i = 1;; i *= 2) { + if (sz(a) + sz(b) <= i) { + res.resize(i, 0); + break; + } + } + for (int i = 0; i < sz(a); i++) { + for (int j = 0; j < sz(b); j++) { + res[i+j] += a[i] * b[j]; + } + } + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = Random::integer(1, 100); + int m = Random::integer(1, 100); + auto a = Random::integers(n, -1000, 1000); + auto b = Random::integers(m, -1000, 1000); + auto expected = naive(a, b); + auto got = mul(a, b); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1ll << 19; +void performance_test() { + timer t; + vector a = Random::integers(N, -1000, 1000); + vector b = Random::integers(N, -1000, 1000); + t.start(); + auto got = mul(a, b); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * got[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/transforms/multiplyNTT.cpp b/test/math/transforms/multiplyNTT.cpp new file mode 100644 index 0000000..70fc137 --- /dev/null +++ b/test/math/transforms/multiplyNTT.cpp @@ -0,0 +1,56 @@ +#include "../../util.h" +#include +#include +#include + +vector naive(const vector& a, const vector& b) { + vector res; + for (ll i = 1;; i *= 2) { + if (sz(a) + sz(b) <= i) { + res.resize(i, 0); + break; + } + } + for (int i = 0; i < sz(a); i++) { + for (int j = 0; j < sz(b); j++) { + res[i+j] += a[i] * b[j]; + res[i+j] %= mod; + } + } + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = Random::integer(1, 100); + int m = Random::integer(1, 100); + auto a = Random::integers(n, 0, mod); + auto b = Random::integers(m, 0, mod); + auto expected = naive(a, b); + auto got = mul(a, b); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1ll << 20; +void performance_test() { + timer t; + vector a = Random::integers(N, 0, mod); + vector b = Random::integers(N, 0, mod); + t.start(); + auto got = mul(a, b); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * got[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/transforms/ntt.cpp b/test/math/transforms/ntt.cpp new file mode 100644 index 0000000..cd32073 --- /dev/null +++ b/test/math/transforms/ntt.cpp @@ -0,0 +1,39 @@ +#include "../../util.h" +#include +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = 1ll << Random::integer(0, 10); + auto expected = Random::integers(n, 0, mod); + auto got = expected; + ntt(got, false); + ntt(got, true); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1ll << 22; +void performance_test() { + timer t; + vector a = Random::integers(N, 0, mod); + vector b = Random::integers(N, 0, mod); + t.start(); + ntt(a, true); + ntt(b, false); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * a[i]; + for (ll i = 0; i < N; i++) hash += i * b[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/transforms/orTransform.cpp b/test/math/transforms/orTransform.cpp new file mode 100644 index 0000000..0ec9155 --- /dev/null +++ b/test/math/transforms/orTransform.cpp @@ -0,0 +1,38 @@ +#include "../../util.h" +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = 1ll << Random::integer(0, 10); + auto expected = Random::integers(n, -1000, 1000); + auto got = expected; + fft(got, false); + fft(got, true); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1ll << 23; +void performance_test() { + timer t; + vector a = Random::integers(N, -1000, 1000); + vector b = Random::integers(N, -1000, 1000); + t.start(); + fft(a, true); + fft(b, false); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * a[i]; + for (ll i = 0; i < N; i++) hash += i * b[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/math/transforms/xorTransform.cpp b/test/math/transforms/xorTransform.cpp new file mode 100644 index 0000000..17b0f6f --- /dev/null +++ b/test/math/transforms/xorTransform.cpp @@ -0,0 +1,38 @@ +#include "../../util.h" +#include + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = 1ll << Random::integer(0, 10); + auto expected = Random::integers(n, -1000, 1000); + auto got = expected; + fft(got, false); + fft(got, true); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested queries: " << queries << endl; +} + +constexpr int N = 1ll << 23; +void performance_test() { + timer t; + vector a = Random::integers(N, -1000, 1000); + vector b = Random::integers(N, -1000, 1000); + t.start(); + fft(a, true); + fft(b, false); + t.stop(); + hash_t hash = 0; + for (ll i = 0; i < N; i++) hash += i * a[i]; + for (ll i = 0; i < N; i++) hash += i * b[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/other/compiletime.cpp b/test/other/compiletime.cpp new file mode 100644 index 0000000..591669d --- /dev/null +++ b/test/other/compiletime.cpp @@ -0,0 +1,2 @@ +#include +int main() {} \ No newline at end of file diff --git a/test/other/divideAndConquer.cpp b/test/other/divideAndConquer.cpp new file mode 100644 index 0000000..a6fda9d --- /dev/null +++ b/test/other/divideAndConquer.cpp @@ -0,0 +1,103 @@ +#include "../util.h" +constexpr ll inf = LL::INF; +#include + +vector> gen(int n) { + vector> res(n, vector(n)); + ll mi = 0; + for (ll a = n-1; a >= 0; a--) { + for (ll c = n-1; c >= a; c--) { + for (ll b = a; b <= c; b++) { + for (ll d = c; d < n; d++) { + res[a][c] = min(res[a][c], res[a][d] + res[b][c] - res[b][d]); + } + } + res[a][c] -= Random::integer(0, 1000); + mi = min(mi, res[a][c]); + } + } + for (auto& v : res) for (auto& x : v) x -= mi; + + for (ll a = 0; a < n; a++) { + for (ll b = a; b < n; b++) { + for (ll c = b; c < n; c++) { + for (ll d = c; d < n; d++) { + if (res[a][d] < 0 || res[a][d] + res[b][c] < res[a][c] + res[b][d]) { + cerr << "invalid C array!" << FAIL; + } + } + } + } + } + return res; +} + +vector> genQuick(int n) { + vector> res(n, vector(n)); + for (ll a = n-1; a >= 0; a--) { + for (ll c = n-1; c >= a; c--) { + res[a][c] = (c-a) * (c - a) + Random::integer(0, 2); + } + } + return res; +} + +/*ll naive(int n, int m) { + vector> state(m+1, vector(n+1, inf)); + state[0][0] = 0; + for (int i = 1; i <= m; i++) { + for (int j = 1; j <= n; j++) { + for (int k = 1; k <= j; k++) { + state[i][j] = min(state[i][j], state[i-1][k-1] + C[k-1][j-1]); + } + } + } + return state[m][n]; +}*/ + +vector naive(int n) { + vector> state(n+1, vector(n+1, inf)); + state[0][0] = 0; + vector res(n+1, inf); + for (int i = 1; i <= n; i++) { + for (int j = 1; j <= n; j++) { + for (int k = 1; k <= j; k++) { + state[i][j] = min(state[i][j], state[i-1][k-1] + C[k-1][j-1]); + } + } + res[i] = state[i][n]; + } + return res; +} + +void stress_test() { + ll tests = 0; + for (ll i = 0; i < 1000; i++) { + auto n = Random::integer(10, 20); + C = gen(n); + auto expected = naive(n); + for (ll m = 1; m <= n; m++) { + auto got = calc(n, m); + if (got != expected[m]) cerr << "got: " << got << ", expected: " << expected[m] << FAIL; + tests++; + } + } + cerr << "tested random queries: " << tests << endl; +} + +constexpr int N = 10'000; +void performance_test() { + timer t; + C = genQuick(N); + t.start(); + auto hash = calc(N, 32); + t.stop(); + if (t.time > 50) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/other/fastIO.cpp b/test/other/fastIO.cpp new file mode 100644 index 0000000..765ddba --- /dev/null +++ b/test/other/fastIO.cpp @@ -0,0 +1,32 @@ +#include "../util.h" +#include + +int main() { + if (freopen("other/fastIO.in", "r", stdin) == nullptr) cerr << "fastIO.in not found" << FAIL; + vector got(5); + vector expected = {4, 7, 3, 6, 9}; + for (int& x : got) fastscan(x); + if (got != expected) cerr << "failed fastscan" << FAIL; + + if (freopen("other/fastIO.out", "w", stdout) == nullptr) cerr << "fastIO.out not writebale" << FAIL; + fastprint(0); + putchar('\n'); + fastprint(-1); + putchar(' '); + fastprint(-8321648); + putchar(' '); + fastprint(1); + putchar(' '); + fastprint(42387); + putchar('\n'); + fclose(stdout); + + stringstream buffer; + { + ifstream tmp("other/fastIO.out"); + buffer << tmp.rdbuf(); + } + if (buffer.str() != "0\n-1 -8321648 1 42387\n") cerr << "failed fastprint" << FAIL; + cerr << "done" << endl; +} + diff --git a/test/other/fastIO.in b/test/other/fastIO.in new file mode 100644 index 0000000..45594a4 --- /dev/null +++ b/test/other/fastIO.in @@ -0,0 +1,2 @@ +4 7 +3 6 9 \ No newline at end of file diff --git a/test/other/josephus2.cpp b/test/other/josephus2.cpp new file mode 100644 index 0000000..d28fe0d --- /dev/null +++ b/test/other/josephus2.cpp @@ -0,0 +1,42 @@ +#include "../util.h" +#include + +template +ll naive(ll n, ll k) { + vector state(n); + iota(all(state), O); + for (ll i = k-1; state.size() > 1; i = (i + k - 1) % sz(state)) { + state.erase(state.begin() + i); + } + return state[0]; +} + +void stress_test() { + ll tests = 0; + for (ll i = 1; i < 2'000; i++) { + auto got = rotateLeft(i); + auto expected = naive<1>(i, 2); + if (got != expected) cerr << "error: " << i << FAIL; + tests++; + } + cerr << "tested queries: " << tests << endl; +} + +constexpr int N = 1'000'000'000; +void performance_test() { + timer t; + hash_t hash = 0; + t.start(); + for (ll i = 0; i < N; i++) { + hash += rotateLeft(1'000'000'000'000'000'000ll + i); + } + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/other/josephusK.cpp b/test/other/josephusK.cpp new file mode 100644 index 0000000..e837640 --- /dev/null +++ b/test/other/josephusK.cpp @@ -0,0 +1,43 @@ +#include "../util.h" +#include +#include + +template +ll naive(ll n, ll k) { + vector state(n); + iota(all(state), O); + for (ll i = k-1; state.size() > 1; i = (i + k - 1) % sz(state)) { + state.erase(state.begin() + i); + } + return state[0]; +} + +void stress_test() { + ll tests = 0; + for (ll i = 1; i < 500; i++) { + for (ll j = 1; j <= i; j++) { + auto got = josephus(i, j); + auto expected = naive<0>(i, j); + if (got != expected) cerr << "error: " << i << FAIL; + tests++; + } + } + cerr << "tested queries: " << tests << endl; +} + +constexpr int N = 10'000'000; +void performance_test() { + timer t; + hash_t hash = 0; + t.start(); + hash += josephus(N, N/2); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/other/knuth.cpp b/test/other/knuth.cpp new file mode 100644 index 0000000..d469ceb --- /dev/null +++ b/test/other/knuth.cpp @@ -0,0 +1,103 @@ +#include "../util.h" +constexpr ll inf = LL::INF; +#include + +vector> gen(int n) { + vector> res(n, vector(n)); + ll mi = 0; + for (ll a = n-1; a >= 0; a--) { + for (ll c = n-1; c >= a; c--) { + for (ll b = a; b <= c; b++) { + for (ll d = c; d < n; d++) { + res[a][c] = min(res[a][c], res[a][d] + res[b][c] - res[b][d]); + } + } + res[a][c] -= Random::integer(0, 1000); + mi = min(mi, res[a][c]); + } + } + for (auto& v : res) for (auto& x : v) x -= mi; + + for (ll a = 0; a < n; a++) { + for (ll b = a; b < n; b++) { + for (ll c = b; c < n; c++) { + for (ll d = c; d < n; d++) { + if (res[a][d] < 0 || res[a][d] + res[b][c] < res[a][c] + res[b][d]) { + cerr << "invalid C array!" << FAIL; + } + } + } + } + } + return res; +} + +vector> genQuick(int n) { + vector> res(n, vector(n)); + for (ll a = n-1; a >= 0; a--) { + for (ll c = n-1; c >= a; c--) { + res[a][c] = (c-a) * (c - a) + Random::integer(0, 2); + } + } + return res; +} + +/*ll naive(int n, int m, const vector>& C) { + vector> state(m+1, vector(n+1, inf)); + state[0][0] = 0; + for (int i = 1; i <= m; i++) { + for (int j = 1; j <= n; j++) { + for (int k = 1; k <= j; k++) { + state[i][j] = min(state[i][j], state[i-1][k-1] + C[k-1][j-1]); + } + } + } + return state[m][n]; +}*/ + +vector naive(int n, const vector>& C) { + vector> state(n+1, vector(n+1, inf)); + state[0][0] = 0; + vector res(n+1, inf); + for (int i = 1; i <= n; i++) { + for (int j = 1; j <= n; j++) { + for (int k = 1; k <= j; k++) { + state[i][j] = min(state[i][j], state[i-1][k-1] + C[k-1][j-1]); + } + } + res[i] = state[i][n]; + } + return res; +} + +void stress_test() { + ll tests = 0; + for (ll i = 0; i < 1000; i++) { + auto n = Random::integer(10, 20); + auto C = gen(n); + auto expected = naive(n, C); + for (ll m = 1; m <= n; m++) { + auto got = calc(n, m, C); + if (got != expected[m]) cerr << "got: " << got << ", expected: " << expected[m] << FAIL; + tests++; + } + } + cerr << "tested random queries: " << tests << endl; +} + +constexpr int N = 5000; +void performance_test() { + timer t; + auto C = genQuick(N); + t.start(); + auto hash = calc(N, N/2, C); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/other/sos.cpp b/test/other/sos.cpp new file mode 100644 index 0000000..f3a6109 --- /dev/null +++ b/test/other/sos.cpp @@ -0,0 +1,50 @@ +#include "../util.h" + +vector sos(const vector& in) { + #include + return res; +} + +vector naive(const vector& in) { + vector res(sz(in)); + for (ll i = 0; i < sz(in); i++) { + for (ll j = 0; j <= i; j++) { + if ((i | j) == i) { + res[i] += in[j]; + } + } + } + return res; +} + +void stress_test() { + ll tests = 0; + for (ll i = 0; i < 1000; i++) { + int n = Random::integer(1, 100); + auto in = Random::integers(n, -1000, 1000); + auto got = sos(in); + auto expected = naive(in); + if (got != expected) cerr << "error: " << i << FAIL; + tests += n; + } + cerr << "tested random queries: " << tests << endl; +} + +constexpr int N = 10'000'000; +void performance_test() { + timer t; + auto in = Random::integers(N, -1000, 1000); + t.start(); + auto res = sos(in); + t.stop(); + hash_t hash = 0; + for (ll x : res) hash += x; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} + diff --git a/test/other/split.cpp b/test/other/split.cpp new file mode 100644 index 0000000..e0f5ee1 --- /dev/null +++ b/test/other/split.cpp @@ -0,0 +1,24 @@ +#include "../util.h" +#include + +vector split2(string_view s, string_view delim) { + vector res; + while (!s.empty()) { + auto end = s.find_first_of(delim); + if (end != 0) res.emplace_back(s.substr(0, end)); + if (end == string_view::npos) break; + s.remove_prefix(end + 1); + } + return res; +} + +int main() { + auto in = "+" + Random::string(100, "abcdef+-*") + "-"; + + auto expected = split2(in, "+-*"); + auto got = split(in, "+-*"); + + if (got != expected) cerr << "error" << FAIL; + cerr << "done" << endl; +} + diff --git a/test/string/ahoCorasick.cpp b/test/string/ahoCorasick.cpp new file mode 100644 index 0000000..c3361d6 --- /dev/null +++ b/test/string/ahoCorasick.cpp @@ -0,0 +1,76 @@ +#include "../util.h" +#include + +vector naive(string s, vector patterns) { + vector ans(patterns.size()); + for (int k = 0; k < (int)patterns.size(); k++) { + string pattern = patterns[k]; + for (int i = 0; i + pattern.size() <= s.size(); i++) { + if (s.substr(i, pattern.size()) == pattern) ans[k]++; + } + } + return ans; +} + +vector normal(string s, vector patterns) { + AhoCorasick aho; + vector ind(patterns.size()); + for (int i = 0; i < (int)patterns.size(); i++) { + ind[i] = aho.addString(patterns[i]); + } + aho.buildGraph(); + + int v = 0; + for (char c : s) v = aho.go(v, c - OFFSET), aho.dp[v]++; + aho.dfs(); + vector ans(patterns.size()); + for (int i = 0; i < (int)patterns.size(); i++) { + ans[i] = aho.dp[ind[i]]; + } + return ans; +} + +void stress_test() { + ll queries = 0; + for (int i = 0; i < 100; i++) { + int n = Random::integer(1, 100); + string s = Random::string(n, "abc"); + int m = Random::integer(1, 100); + vector patterns(m); + for (string& e : patterns) { + int k = Random::integer(1, 100); + e = Random::string(k, "abc"); + } + + auto got = normal(s, patterns); + auto expected = naive(s, patterns); + if (got != expected) cerr << "Wrong Answer" << FAIL; + queries++; + } + cerr << "Tested random queries: " << queries << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + string s = string(N, 'a') + Random::string(N, "ab"); + vector patterns = {"a"}; + for (int sm = 1; sm < N; sm += patterns.back().size()) { + patterns.emplace_back(patterns.back().size()+1, 'a'); + } + for (int i = 0; i < 100; i++) { + patterns.emplace_back(Random::string(N/100, "ab")); + } + + t.start(); + hash_t hash = normal(s, patterns)[0]; + t.stop(); + + if (t.time > 500) cerr << "Too slow: " << t.time << FAIL; + cerr << "Tested performance: " << t.time << "ms (hash: hash " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/string/deBruijn.cpp b/test/string/deBruijn.cpp new file mode 100644 index 0000000..6b3fea4 --- /dev/null +++ b/test/string/deBruijn.cpp @@ -0,0 +1,43 @@ +#include "../util.h" +#include +#include + +bool isDeBruijn(string s, int n, int k) { + ll expected = 1; + for (ll i = 0; i < n; i++) expected *= k; + if (expected != sz(s)) return false; + s += s; + set seen; + for (ll i = 0; 2*i < sz(s); i++) { + seen.insert(string_view(s).substr(i, n)); + } + return sz(seen) == expected; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 1000; i++) { + int n = Random::integer(1, 9); + auto [l, r] = Random::pair('b', 'f'); + auto got = deBruijn(n, l, r); + if (!isDeBruijn(got, n, r - l + 1)) cerr << "error" << FAIL; + queries += sz(got); + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 26; +void performance_test() { + timer t; + t.start(); + auto res = deBruijn(N, '0', '1'); + t.stop(); + hash_t hash = sz(res); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/string/duval.cpp b/test/string/duval.cpp new file mode 100644 index 0000000..58b4a44 --- /dev/null +++ b/test/string/duval.cpp @@ -0,0 +1,85 @@ +#include "../util.h" +#pragma GCC diagnostic ignored "-Wreturn-type" +#include + +constexpr int N = 20'000'000; + +bool isLyndon(string_view s) { + string t = string(s) + string(s); + for (ll i = 1; i < sz(s); i++) { + if (s >= t.substr(i, sz(s))) return false; + } + return !s.empty(); +} + +void stress_test_duval() { + ll queries = 0; + for (int i = 0; i < 10'000; i++) { + int n = Random::integer(1, 100); + auto s = Random::string(n, "abc"); + vector> got = duval(s); + if (got.empty()) cerr << "error: a" << FAIL; + if (got.front().first != 0) cerr << "error: b" << FAIL; + if (got.back().second != n) cerr << "error: c" << FAIL; + for (int j = 1; j < sz(got); j++) { + if (got[j - 1].second != got[j].first) cerr << "error: d" << FAIL; + } + for (auto [l, r] : got) { + if (!isLyndon(string_view(s).substr(l, r-l))) cerr << "error: e" << FAIL; + } + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +void performance_test_duval() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + t.start(); + auto got = duval(s); + t.stop(); + hash_t hash = 0; + for (auto [l, r] : got) hash += l + r; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int naive(string s) { + ll n = sz(s); + s += s; + int res = 0; + for (int i = 0; i < n; i++) { + if (string_view(s).substr(i, n) <= string_view(s).substr(res, n)) res = i; + } + return res; +} + +void stress_test_minrotation() { + ll queries = 0; + for (int i = 0; i < 10'000; i++) { + int n = Random::integer(1, 100); + auto s = Random::string(n, "abc"); + int got = minrotation(s); + auto expected = naive(s); + if (got != expected) cerr << s << ": got: " << got << ", expected: " << expected << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +void performance_test_minrotation() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + t.start(); + hash_t hash = minrotation(s); + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test_duval(); + performance_test_duval(); + stress_test_minrotation(); + performance_test_minrotation(); +} diff --git a/test/string/kmp.cpp b/test/string/kmp.cpp new file mode 100644 index 0000000..9c9c924 --- /dev/null +++ b/test/string/kmp.cpp @@ -0,0 +1,85 @@ +#include "../util.h" +#include + +vector naive(string_view s) { + vector res(sz(s) + 1, -1); + for (int i = 0; i < sz(s); i++) { + for (int j = 0; j <= i; j++) + if (s.substr(0, j) == s.substr(i-j+1, j)) + res[i+1] = j; + } + return res; +} + +void stress_test_preprocessing() { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(1, 15); + auto s = Random::string(n, "abc"); + auto got = kmpPreprocessing(s); + auto expected = naive(s); + if (got != expected) cerr << " error" << FAIL; + queries += n; + } + cerr << " tested random queries: " << queries << endl; +} + +constexpr int N = 10'000'000; +void performance_test_preprocessing() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + t.start(); + auto res = kmpPreprocessing(s); + t.stop(); + hash_t hash = 0; + for (int x : res) hash += x; + if (t.time > 500) cerr << " too slow: " << t.time << FAIL; + cerr << " tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +vector naive(string_view s, string_view sub) { + vector res; + auto pos = s.find(sub); + while (pos != string_view::npos) { + res.push_back(pos); + pos = s.find(sub, pos + 1); + } + return res; +} + +void stress_test_kmp() { + ll queries = 0; + auto a = Random::string(10'000, "abc"); + for (int tries = 0; tries < 10'000; tries++) { + int n = Random::integer(1, 10); + auto b = Random::string(n, "abc"); + auto got = kmpSearch(a, b); + auto expected = naive(a, b); + if (got != expected) cerr << " error" << FAIL; + queries += got.size(); + } + cerr << " tested random queries: " << queries << endl; +} + +void performance_test_kmp() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + auto sub1 = Random::string(N/2, "a"); + auto sub2 = Random::string(N/2, "ab"); + hash_t hash = 0; + t.start(); + hash += kmpSearch(s, sub1).size(); + hash += kmpSearch(s, sub2).size(); + t.stop(); + if (t.time > 500) cerr << " too slow: " << t.time << FAIL; + cerr << " tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + cerr << "preprocessing:" << endl; + stress_test_preprocessing(); + performance_test_preprocessing(); + cerr << "kmp:" << endl; + stress_test_kmp(); + performance_test_kmp(); +} diff --git a/test/string/longestCommonSubsequence.cpp b/test/string/longestCommonSubsequence.cpp new file mode 100644 index 0000000..6d7a6c5 --- /dev/null +++ b/test/string/longestCommonSubsequence.cpp @@ -0,0 +1,55 @@ +#include "../util.h" +#include + +bool isSubstr(string_view s, string_view sub) { + int i = 0; + for (char c : s) { + if (i < sz(sub) && c == sub[i]) i++; + } + return i >= sz(sub); +} + +string naive(string_view s, string_view t) { + string res = ""; + for (ll i = 1; i < (1ll << sz(s)); i++) { + string tmp; + for (ll j = 0; j < sz(s); j++) { + if (((i >> j) & 1) != 0) tmp.push_back(s[j]); + } + if (sz(tmp) >= sz(res) && isSubstr(t, tmp)) res = tmp; + } + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = Random::integer(1, 12); + int m = Random::integer(1, 12); + auto s = Random::string(n, "abc"); + auto t = Random::string(m, "abc"); + auto got = lcss(s, t); + auto expected = naive(s, t); + if (got != expected) cerr << s << ", " << t << ", got: " << got << ", expected: " << expected << FAIL; + queries += n + m; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 2'000; +void performance_test() { + timer t; + auto a = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + auto b = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + t.start(); + auto res = lcss(a, b); + t.stop(); + hash_t hash = sz(res); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/string/lyndon.cpp b/test/string/lyndon.cpp new file mode 100644 index 0000000..ecf2dad --- /dev/null +++ b/test/string/lyndon.cpp @@ -0,0 +1,61 @@ +#include "../util.h" +#include + +bool isLyndon(string_view s) { + string t = string(s) + string(s); + for (ll i = 1; i < sz(s); i++) { + if (s >= t.substr(i, sz(s))) return false; + } + return !s.empty(); +} + +vector naive(ll n, char mi, char ma) { + vector res; + auto dfs = [&](auto&& self, string pref)->void{ + if (sz(pref) <= n && isLyndon(pref)) res.push_back(pref); + if (sz(pref) >= n) return; + for (char c = mi; c <= ma; c++) { + self(self, pref + c); + } + }; + dfs(dfs, ""); + return res; +} + +vector fast(ll n, char mi, char ma) { + vector res; + string tmp(1, mi); + do { + res.push_back(tmp); + } while (next(tmp, n, mi, ma)); + return res; +} + +void stress_test() { + ll queries = 0; + for (ll i = 0; i < 10'000; i++) { + int n = Random::integer(1, 6); + auto [l, r] = Random::pair('a', 'f'); + auto got = fast(n, l, r); + auto expected = naive(n, l, r); + if (got != expected) cerr << "error" << FAIL; + queries += sz(expected); + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 9; +void performance_test() { + timer t; + t.start(); + auto res = fast(N, 'a', 'f'); + t.stop(); + hash_t hash = sz(res); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/string/manacher.cpp b/test/string/manacher.cpp new file mode 100644 index 0000000..503d181 --- /dev/null +++ b/test/string/manacher.cpp @@ -0,0 +1,49 @@ +#include "../util.h" +#include + +vector naive(string_view s) { + vector res(2 * sz(s) + 1); + for (int i = 0; i < sz(s); i++) { //odd palindromes + int j = 2*i+1; + while (i+res[j] < sz(s) && i-res[j] >= 0 && s[i-res[j]] == s[i+res[j]]) res[j]++; + res[j]*=2; + res[j]--; + } + for (int i = 0; i <= sz(s); i++) { //even palindromes + int j = 2*i; + while (i+res[j] < sz(s) && i-res[j]-1 >= 0 && s[i-res[j]-1] == s[i+res[j]]) res[j]++; + res[j] *= 2; + } + return res; +} + +void stress_test() { + ll queries = 0; + for (int i = 0; i < 10'000; i++) { + int n = Random::integer(1, 100); + auto s = Random::string(n, "abc"); + vector got = manacher(s); + vector expected = naive(s); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 5'000'000; +void performance_test() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + t.start(); + auto got = manacher(s); + t.stop(); + hash_t hash = 0; + for (int x : got) hash += x; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/string/rollingHash.cpp b/test/string/rollingHash.cpp new file mode 100644 index 0000000..0491bc0 --- /dev/null +++ b/test/string/rollingHash.cpp @@ -0,0 +1,92 @@ +#include "../util.h" +#include + +string thueMorse(ll n) { + string res = "a"; + while (sz(res) < n) { + string tmp = res; + for (char& c : tmp) c ^= 1; + res += tmp; + } + return res; +} + +auto getHash(const string& s) { + return Hash(s)(0, sz(s)); +} + +void testThueMorse() { + set got; + set expected; + string s = thueMorse(1000); + Hash h(s); + for (int l = 0; l < sz(s); l++) { + for (int r = l + 1; r <= sz(s); r++) { + got.insert(h(l, r)); + expected.insert(s.substr(l, r - l)); + } + } + if (sz(got) != sz(expected)) cerr << "error: thueMorse" << FAIL; + cerr << "thueMorse: ok" << endl; +} + +void testTiny() { + if (getHash("aa") == getHash("a")) cerr << "error: tiny" << FAIL; + if (getHash("00") == getHash("0")) cerr << "error: tiny" << FAIL; + if (getHash("AA") == getHash("A")) cerr << "error: tiny" << FAIL; + cerr << "tiny: ok" << endl; +} + +void testSmall() { + set got; + ll expected = 0; + auto dfs = [&](auto&& self, string pref)->void { + expected++; + got.insert(getHash(pref)); + if(sz(pref) >= 5) return; + for (char c = 'a'; c <= 'z'; c++) { + self(self, pref + c); + } + }; + dfs(dfs, ""); + if (sz(got) != expected) cerr << "error: small" << FAIL; + cerr << "small: ok" << endl; +} + +void stress_test() { + set got; + set expected; + string s = Random::string(1000, "abc"); + Hash h(s); + for (int l = 0; l < sz(s); l++) { + for (int r = l + 1; r <= sz(s); r++) { + got.insert(h(l, r)); + expected.insert(s.substr(l, r - l)); + } + } + if (sz(got) != sz(expected)) cerr << "error: stress test" << FAIL; + cerr << "stress test: ok" << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + hash_t hash = 0; + t.start(); + Hash h(s); + for (ll i = 0; i < N; i++) { + hash += h(i, i + 2*N); + } + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + testThueMorse(); + testTiny(); + testSmall(); + stress_test(); + performance_test(); +} diff --git a/test/string/rollingHashCf.cpp b/test/string/rollingHashCf.cpp new file mode 100644 index 0000000..79003de --- /dev/null +++ b/test/string/rollingHashCf.cpp @@ -0,0 +1,94 @@ +#include "../util.h" +#include + +constexpr ll RandomQ = 318LL << 53; + +string thueMorse(ll n) { + string res = "a"; + while (sz(res) < n) { + string tmp = res; + for (char& c : tmp) c ^= 1; + res += tmp; + } + return res; +} + +auto getHash(const string& s) { + return Hash(s, RandomQ)(0, sz(s)); +} + +void testThueMorse() { + set got; + set expected; + string s = thueMorse(1000); + Hash h(s, RandomQ); + for (int l = 0; l < sz(s); l++) { + for (int r = l + 1; r <= sz(s); r++) { + got.insert(h(l, r)); + expected.insert(s.substr(l, r - l)); + } + } + if (sz(got) != sz(expected)) cerr << "error: thueMorse" << FAIL; + cerr << "thueMorse: ok" << endl; +} + +void testTiny() { + if (getHash("aa") == getHash("a")) cerr << "error: tiny" << FAIL; + if (getHash("00") == getHash("0")) cerr << "error: tiny" << FAIL; + if (getHash("AA") == getHash("A")) cerr << "error: tiny" << FAIL; + cerr << "tiny: ok" << endl; +} + +void testSmall() { + set got; + ll expected = 0; + auto dfs = [&](auto&& self, string pref)->void { + expected++; + got.insert(getHash(pref)); + if(sz(pref) >= 5) return; + for (char c = 'a'; c <= 'z'; c++) { + self(self, pref + c); + } + }; + dfs(dfs, ""); + if (sz(got) != expected) cerr << "error: small" << FAIL; + cerr << "small: ok" << endl; +} + +void stress_test() { + set got; + set expected; + string s = Random::string(1000, "abc"); + Hash h(s, RandomQ); + for (int l = 0; l < sz(s); l++) { + for (int r = l + 1; r <= sz(s); r++) { + got.insert(h(l, r)); + expected.insert(s.substr(l, r - l)); + } + } + if (sz(got) != sz(expected)) cerr << "error: stress test" << FAIL; + cerr << "stress test: ok" << endl; +} + +constexpr int N = 1'000'000; +void performance_test() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + hash_t hash = 0; + t.start(); + Hash h(s, RandomQ); + for (ll i = 0; i < N; i++) { + hash += h(i, i + 2*N); + } + t.stop(); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + testThueMorse(); + testTiny(); + testSmall(); + stress_test(); + performance_test(); +} diff --git a/test/string/suffixArray.cpp b/test/string/suffixArray.cpp new file mode 100644 index 0000000..4945d8e --- /dev/null +++ b/test/string/suffixArray.cpp @@ -0,0 +1,61 @@ +#include "../util.h" +#include + +vector naive(string_view s) { + vector SA(sz(s)); + iota(all(SA), 0); + sort(all(SA), [s](int a, int b){ + return s.substr(a) < s.substr(b); + }); + return SA; +} + +int lcp(string_view s, int x, int y) { + int res = 0; + while (x + res < sz(s) && y + res < sz(s) && s[x + res] == s[y + res]) res++; + return res; +} + +void stress_test() { + ll queries = 0; + for (int i = 0; i < 100; i++) { + int n = Random::integer(1, 100); + auto s = Random::string(n, "abc"); + SuffixArray sa(s); + vector got = sa.SA; + vector expected = naive(s); + vector SA(n); + if (got != expected) cerr << "error: SA" << FAIL; + got = sa.LCP; + swap(SA, expected); + for (int x = 0; x < n; x++) { + for (int y = 0; y < n; y++) { + int gotLCP = sa.lcp(x, y); + int expectedLCP = lcp(s, x, y); + if (gotLCP != expectedLCP) cerr << "error: lcp" << FAIL; + } + if (x > 0) expected[x] = lcp(s, SA[x-1], SA[x]); + } + if (got != expected) cerr << "error: LCP" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 200'000; +void performance_test() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + t.start(); + SuffixArray sa(s); + t.stop(); + hash_t hash = 0; + for (int i = 0; i < sz(sa.SA); i++) hash += i*sa.SA[i]; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/string/suffixAutomaton.cpp b/test/string/suffixAutomaton.cpp new file mode 100644 index 0000000..c2ff511 --- /dev/null +++ b/test/string/suffixAutomaton.cpp @@ -0,0 +1,62 @@ +#include "../util.h" +#include + +pair naive(string_view s, string_view t) { + int pos = 0; + int len = 0; + for (int j = 0; j < sz(t); j++) { + for (int i = 0; i < sz(s); i++) { + int cur = 0; + while (i+cur < sz(s) && j+cur < sz(t) && s[i+cur] == t[j+cur]) cur++; + if (cur > len) { + pos = j; + len = cur; + } + } + } + return {pos, len}; +} + +void stress_test() { + ll queries = 0; + for (int i = 0; i < 1000; i++) { + int n = Random::integer(1, 100); + auto s = Random::string(n, "abc"); + SuffixAutomaton sa(s); + for (int j = 0; j < 1000; j++) { + int m = Random::integer(1, 100); + auto t = Random::string(m, "abc"); + auto got = sa.longestCommonSubstring(t); + auto expected = naive(s, t); + if (got != expected) cerr << "error" << FAIL; + queries += m; + } + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 500'000; +void performance_test() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyz"); + t.start(); + SuffixAutomaton sa(s); + t.stop(); + hash_t hash = 0; + for (ll c = 0; c < sz(s);) { + int m = Random::integer(1, 1000); + s = Random::string(m, "abc"); + t.start(); + auto [p, l] = sa.longestCommonSubstring(s); + t.stop(); + hash += l + p; + c += m; + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/string/suffixTree.cpp b/test/string/suffixTree.cpp new file mode 100644 index 0000000..c0d79e4 --- /dev/null +++ b/test/string/suffixTree.cpp @@ -0,0 +1,50 @@ +#include "../util.h" +#include + +vector naive(string_view s) { + vector res(sz(s)); + for (ll i = 0; i < sz(s); i++) { + res[i] = s.substr(i); + } + return res; +} + +void stress_test() { + ll queries = 0; + for (int i = 0; i < 10'000; i++) { + int n = Random::integer(1, 15); + auto s = Random::string(n, "abc") + "#"; + SuffixTree st(s); + vector got(n + 1); + auto dfs = [&](auto&& self, string pref, ll node) -> void { + auto& [l, r, _, next] = st.tree[node]; + if (l >= 0) pref += s.substr(l, r - l); + if (pref.back() == '#') got[n + 1 - sz(pref)] = pref; + for (auto [__, j] : next) { + self(self, pref, j); + } + }; + dfs(dfs, "", 0); + auto expected = naive(s); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 200'000; +void performance_test() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + t.start(); + SuffixTree st(s); + t.stop(); + hash_t hash = sz(st.tree); + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/string/trie.cpp b/test/string/trie.cpp new file mode 100644 index 0000000..45d89cf --- /dev/null +++ b/test/string/trie.cpp @@ -0,0 +1,58 @@ +#include "../util.h" +#include + +void stress_test() { + multiset> naive; + ll queries = 0; + ll deleted = 0; + for (int tries = 0; tries < 100'000; tries++) { + { + int n = Random::integer(1, 20); + auto s = Random::integers(n, 0, 2); + insert(s); + naive.insert(s); + } + { + int n = Random::integer(1, 20); + auto s = Random::integers(n, 0, 2); + bool got = erase(s); + auto it = naive.find(s); + bool expected = it != naive.end(); + if (expected) naive.erase(it); + if (got != expected) cerr << "error" << FAIL; + queries++; + if (got) deleted++; + } + } + cerr << "tested random queries: " << queries << " (" << deleted << ")" << endl; +} + +constexpr int N = 10'000; +void performance_test() { + timer t; + trie = {node()}; + hash_t hash = 0; + for (int tries = 0; tries < N; tries++) { + { + int n = Random::integer(1, 2000); + auto s = Random::integers(n, 0, 2); + t.start(); + insert(s); + t.stop(); + } + { + int n = Random::integer(1, 2000); + auto s = Random::integers(n, 0, 2); + t.start(); + hash += erase(s); + t.stop(); + } + } + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/string/z.cpp b/test/string/z.cpp new file mode 100644 index 0000000..f890a3e --- /dev/null +++ b/test/string/z.cpp @@ -0,0 +1,41 @@ +#include "../util.h" +#include + +vector naive(const string& s) { + vector res(sz(s)); + for (int i = 1; i < sz(s); i++) { + while (i + res[i] < sz(s) && s[res[i]] == s[i + res[i]]) res[i]++; + } + return res; +} + +void stress_test() { + ll queries = 0; + for (int tries = 0; tries < 100'000; tries++) { + int n = Random::integer(1, 15); + auto s = Random::string(n, "abc"); + auto got = Z(s); + auto expected = naive(s); + if (got != expected) cerr << "error" << FAIL; + queries += n; + } + cerr << "tested random queries: " << queries << endl; +} + +constexpr int N = 10'000'000; +void performance_test() { + timer t; + auto s = Random::string(N, "a") + Random::string(N, "ab") + Random::string(N, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789$#"); + t.start(); + auto res = Z(s); + t.stop(); + hash_t hash = 0; + for (int x : res) hash += x; + if (t.time > 500) cerr << "too slow: " << t.time << FAIL; + cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl; +} + +int main() { + stress_test(); + performance_test(); +} diff --git a/test/template/template.cpp b/test/template/template.cpp new file mode 100644 index 0000000..db9aa00 --- /dev/null +++ b/test/template/template.cpp @@ -0,0 +1 @@ +#include