summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/list_missing.yml21
-rw-r--r--.github/workflows/test_all.yml14
-rw-r--r--.github/workflows/test_datastructures.yml22
-rw-r--r--.github/workflows/test_geometry.yml22
-rw-r--r--.github/workflows/test_graph.yml22
-rw-r--r--.github/workflows/test_math.yml22
-rw-r--r--.github/workflows/test_other.yml22
-rw-r--r--.github/workflows/test_pdf.yml39
-rw-r--r--.github/workflows/test_string.yml22
-rw-r--r--.github/workflows/test_template.yml22
-rw-r--r--.gitignore8
-rw-r--r--Makefile30
-rw-r--r--README.md38
-rw-r--r--content/datastructures/datastructures.tex57
-rw-r--r--content/datastructures/dynamicConvexHull.cpp18
-rw-r--r--content/datastructures/fenwickTree.cpp4
-rw-r--r--content/datastructures/fenwickTree2.cpp16
-rw-r--r--content/datastructures/lazyPropagation.cpp37
-rw-r--r--content/datastructures/lichao.cpp17
-rw-r--r--content/datastructures/monotonicConvexHull.cpp42
-rw-r--r--content/datastructures/pbds.cpp16
-rw-r--r--content/datastructures/persistent.cpp36
-rw-r--r--content/datastructures/persistentArray.cpp48
-rw-r--r--content/datastructures/segmentTree.cpp9
-rw-r--r--content/datastructures/sparseTable.cpp10
-rw-r--r--content/datastructures/sparseTableDisjoint.cpp10
-rw-r--r--content/datastructures/stlHashMap.cpp17
-rw-r--r--content/datastructures/stlPriorityQueue.cpp8
-rw-r--r--content/datastructures/stlTree.cpp13
-rw-r--r--content/datastructures/treap.cpp2
-rw-r--r--content/datastructures/waveletTree.cpp26
-rw-r--r--content/geometry/antipodalPoints.cpp8
-rw-r--r--content/geometry/circle.cpp2
-rw-r--r--content/geometry/closestPair.cpp15
-rw-r--r--content/geometry/convexHull.cpp22
-rw-r--r--content/geometry/delaunay.cpp23
-rw-r--r--content/geometry/formulas.cpp13
-rw-r--r--content/geometry/formulas3d.cpp16
-rw-r--r--content/geometry/geometry.tex17
-rw-r--r--content/geometry/hpi.cpp4
-rw-r--r--content/geometry/linesAndSegments.cpp6
-rw-r--r--content/geometry/polygon.cpp30
-rw-r--r--content/geometry/segmentIntersection.cpp4
-rw-r--r--content/geometry/sortAround.cpp22
-rw-r--r--content/geometry/triangle.cpp4
-rw-r--r--content/graph/2sat.cpp16
-rw-r--r--content/graph/LCA_sparse.cpp14
-rw-r--r--content/graph/TSP.cpp4
-rw-r--r--content/graph/articulationPoints.cpp10
-rw-r--r--content/graph/binary_lifting.cpp28
-rw-r--r--content/graph/bitonicTSP.cpp14
-rw-r--r--content/graph/bitonicTSPsimple.cpp14
-rw-r--r--content/graph/blossom.cpp14
-rw-r--r--content/graph/bronKerbosch.cpp4
-rw-r--r--content/graph/centroid.cpp2
-rw-r--r--content/graph/cycleCounting.cpp18
-rw-r--r--content/graph/dijkstra.cpp31
-rw-r--r--content/graph/dinic.cpp10
-rw-r--r--content/graph/dinicScaling.cpp10
-rw-r--r--content/graph/euler.cpp4
-rw-r--r--content/graph/floydWarshall.cpp12
-rw-r--r--content/graph/graph.tex51
-rw-r--r--content/graph/havelHakimi.cpp6
-rw-r--r--content/graph/hld.cpp2
-rw-r--r--content/graph/hopcroftKarp.cpp8
-rw-r--r--content/graph/kruskal.cpp2
-rw-r--r--content/graph/kuhn.cpp (renamed from content/graph/maxCarBiMatch.cpp)2
-rw-r--r--content/graph/matching.cpp10
-rw-r--r--content/graph/maxWeightBipartiteMatching.cpp4
-rw-r--r--content/graph/minCostMaxFlow.cpp12
-rw-r--r--content/graph/pushRelabel.cpp10
-rw-r--r--content/graph/reroot.cpp6
-rw-r--r--content/graph/scc.cpp8
-rw-r--r--content/graph/stoerWagner.cpp16
-rw-r--r--content/graph/treeIsomorphism.cpp4
-rw-r--r--content/graph/virtualTree.cpp10
-rw-r--r--content/latexHeaders/code.sty29
-rw-r--r--content/latexHeaders/commands.sty7
-rw-r--r--content/latexmk.opt2
-rw-r--r--content/latexmkrc13
-rw-r--r--content/math/berlekampMassey.cpp2
-rw-r--r--content/math/bigint.cpp67
-rw-r--r--content/math/binomial0.cpp2
-rw-r--r--content/math/binomial1.cpp2
-rw-r--r--content/math/discreteLogarithm.cpp4
-rw-r--r--content/math/divisors.cpp2
-rw-r--r--content/math/gauss.cpp4
-rw-r--r--content/math/gcd-lcm.cpp4
-rw-r--r--content/math/inversions.cpp2
-rw-r--r--content/math/inversionsMerge.cpp14
-rw-r--r--content/math/lgsFp.cpp2
-rw-r--r--content/math/linearRecurrence.cpp8
-rw-r--r--content/math/linearRecurrenceOld.cpp8
-rw-r--r--content/math/linearSieve.cpp12
-rw-r--r--content/math/longestIncreasingSubsequence.cpp4
-rw-r--r--content/math/math.tex32
-rw-r--r--content/math/matrixPower.cpp8
-rw-r--r--content/math/permIndex.cpp4
-rw-r--r--content/math/piLegendre.cpp46
-rw-r--r--content/math/polynomial.cpp6
-rw-r--r--content/math/primeSieve.cpp2
-rw-r--r--content/math/recover.cpp2
-rw-r--r--content/math/rho.cpp4
-rw-r--r--content/math/shortModInv.cpp2
-rw-r--r--content/math/simpson.cpp2
-rw-r--r--content/math/sqrtModCipolla.cpp2
-rw-r--r--content/math/tables/composite.tex26
-rw-r--r--content/math/tables/prime-composite.tex31
-rw-r--r--content/math/transforms/andTransform.cpp4
-rw-r--r--content/math/transforms/bitwiseTransforms.cpp6
-rw-r--r--content/math/transforms/fft.cpp2
-rw-r--r--content/math/transforms/fftMul.cpp6
-rw-r--r--content/math/transforms/multiplyBitwise.cpp2
-rw-r--r--content/math/transforms/multiplyFFT.cpp4
-rw-r--r--content/math/transforms/multiplyNTT.cpp2
-rw-r--r--content/math/transforms/ntt.cpp2
-rw-r--r--content/math/transforms/orTransform.cpp4
-rw-r--r--content/math/transforms/seriesOperations.cpp8
-rw-r--r--content/math/transforms/xorTransform.cpp2
-rw-r--r--content/other/fastIO.cpp2
-rw-r--r--content/other/fastSubsetSum.cpp10
-rw-r--r--content/other/josephus2.cpp6
-rw-r--r--content/other/other.tex35
-rw-r--r--content/other/pbs.cpp2
-rw-r--r--content/other/sos.cpp6
-rw-r--r--content/other/timed.cpp2
-rw-r--r--content/string/ahoCorasick.cpp11
-rw-r--r--content/string/deBruijn.cpp2
-rw-r--r--content/string/duval.cpp6
-rw-r--r--content/string/kmp.cpp8
-rw-r--r--content/string/longestCommonSubsequence.cpp8
-rw-r--r--content/string/lyndon.cpp2
-rw-r--r--content/string/manacher.cpp6
-rw-r--r--content/string/rollingHash.cpp2
-rw-r--r--content/string/rollingHashCf.cpp2
-rw-r--r--content/string/string.tex14
-rw-r--r--content/string/suffixArray.cpp19
-rw-r--r--content/string/suffixAutomaton.cpp12
-rw-r--r--content/string/suffixTree.cpp10
-rw-r--r--content/string/trie.cpp4
-rw-r--r--content/string/z.cpp2
-rw-r--r--content/tcr.tex13
-rw-r--r--content/template/template.cpp12
-rw-r--r--tcr.pdfbin703380 -> 0 bytes
-rw-r--r--test/GNUmakefile36
-rw-r--r--test/datastructures/LCT.cpp8
-rw-r--r--test/datastructures/dynamicConvexHull.lichao.cpp2
-rw-r--r--test/datastructures/fenwickTree.cpp4
-rw-r--r--test/datastructures/fenwickTree2.cpp4
-rw-r--r--test/datastructures/lazyPropagation.cpp59
-rw-r--r--test/datastructures/lichao.cpp4
-rw-r--r--test/datastructures/monotonicConvexHull.cpp28
-rw-r--r--test/datastructures/pbds.cpp11
-rw-r--r--test/datastructures/persistentArray.cpp10
-rw-r--r--test/datastructures/segmentTree.cpp6
-rw-r--r--test/datastructures/sparseTable.cpp10
-rw-r--r--test/datastructures/sparseTableDisjoint.cpp6
-rw-r--r--test/datastructures/stlHashMap.cpp4
-rw-r--r--test/datastructures/stlPriorityQueue.cpp6
-rw-r--r--test/datastructures/stlPriorityQueue.cpp.awk37
-rw-r--r--test/datastructures/stlRope.cpp4
-rw-r--r--test/datastructures/stlRope.cpp.awk2
-rw-r--r--test/datastructures/stlTree.cpp2
-rw-r--r--test/datastructures/treap.cpp6
-rw-r--r--test/datastructures/waveletTree.cpp4
-rwxr-xr-xtest/fuzz.sh14
-rw-r--r--test/geometry.h4
-rw-r--r--test/geometry/antipodalPoints.cpp8
-rw-r--r--test/geometry/circle.cpp12
-rw-r--r--test/geometry/closestPair.cpp2
-rw-r--r--test/geometry/closestPair.double.cpp2
-rw-r--r--test/geometry/convexHull.cpp4
-rw-r--r--test/geometry/delaunay.cpp37
-rw-r--r--test/geometry/formulas.cpp2
-rw-r--r--test/geometry/hpi.cpp48
-rw-r--r--test/geometry/polygon.cpp22
-rw-r--r--test/geometry/segmentIntersection.cpp2
-rw-r--r--test/geometry/sortAround.cpp6
-rw-r--r--test/graph/TSP.cpp8
-rw-r--r--test/graph/articulationPoints.bcc.cpp18
-rw-r--r--test/graph/articulationPoints.bridges.cpp12
-rw-r--r--test/graph/articulationPoints.cpp10
-rw-r--r--test/graph/binary_lifting.cpp60
-rw-r--r--test/graph/bronKerbosch.cpp10
-rw-r--r--test/graph/centroid.cpp12
-rw-r--r--test/graph/connect.cpp10
-rw-r--r--test/graph/cycleCounting.cpp6
-rw-r--r--test/graph/dijkstra.cpp12
-rw-r--r--test/graph/euler.cpp10
-rw-r--r--test/graph/floydWarshall.cpp4
-rw-r--r--test/graph/havelHakimi.cpp12
-rw-r--r--test/graph/hopcroftKarp.cpp2
-rw-r--r--test/graph/kuhn.cpp (renamed from test/graph/maxCarBiMatch.cpp)2
-rw-r--r--test/graph/reroot.cpp2
-rw-r--r--test/graph/stoerWagner.cpp4
-rw-r--r--test/graph/treeIsomorphism.cpp8
-rw-r--r--test/graph/virtualTree.cpp8
-rw-r--r--test/math/berlekampMassey.cpp8
-rw-r--r--test/math/bigint.cpp4
-rw-r--r--test/math/binomial0.cpp2
-rw-r--r--test/math/binomial1.cpp2
-rw-r--r--test/math/binomial2.cpp2
-rw-r--r--test/math/binomial3.cpp2
-rw-r--r--test/math/cycleDetection.cpp1
-rw-r--r--test/math/gauss.cpp18
-rw-r--r--test/math/inversions.cpp3
-rw-r--r--test/math/inversionsMerge.cpp4
-rw-r--r--test/math/kthperm.cpp5
-rw-r--r--test/math/kthperm_permIndex.cpp1
-rw-r--r--test/math/lgsFp.cpp12
-rw-r--r--test/math/linearRecurrence.cpp7
-rw-r--r--test/math/linearRecurrenceNTT.cpp6
-rw-r--r--test/math/linearRecurrenceOld.cpp6
-rw-r--r--test/math/linearSieve.cpp2
-rw-r--r--test/math/longestIncreasingSubsequence.cpp13
-rw-r--r--test/math/matrixPower.cpp20
-rw-r--r--test/math/millerRabin.base32.cpp2
-rw-r--r--test/math/millerRabin.cpp2
-rw-r--r--test/math/permIndex.cpp7
-rw-r--r--test/math/polynomial.cpp16
-rw-r--r--test/math/primeSieve.cpp4
-rw-r--r--test/math/primitiveRoot.cpp2
-rw-r--r--test/math/shortModInv.cpp2
-rw-r--r--test/math/transforms/fft.cpp8
-rw-r--r--test/math/transforms/fftMul.cpp10
-rw-r--r--test/math/transforms/multiplyBitwise.cpp6
-rw-r--r--test/math/transforms/multiplyFFT.cpp6
-rw-r--r--test/math/transforms/multiplyNTT.cpp6
-rw-r--r--test/math/transforms/seriesOperations.cpp8
-rw-r--r--test/missing.ignore7
-rw-r--r--test/other/bitOps.cpp6
-rw-r--r--test/other/josephus2.cpp6
-rw-r--r--test/other/josephusK.cpp4
-rw-r--r--test/other/pbs.cpp6
-rw-r--r--test/other/sos.cpp50
-rw-r--r--test/string/deBruijn.cpp10
-rw-r--r--test/string/duval.cpp12
-rw-r--r--test/string/kmp.cpp4
-rw-r--r--test/string/longestCommonSubsequence.cpp12
-rw-r--r--test/string/lyndon.cpp12
-rw-r--r--test/string/manacher.cpp10
-rw-r--r--test/string/rollingHash.cpp20
-rw-r--r--test/string/rollingHashCf.cpp20
-rw-r--r--test/string/suffixArray.cpp10
-rw-r--r--test/string/suffixAutomaton.cpp8
-rw-r--r--test/string/suffixTree.cpp8
-rw-r--r--test/string/z.cpp6
-rwxr-xr-xtest/test.sh114
-rw-r--r--test/util.h68
249 files changed, 1403 insertions, 1638 deletions
diff --git a/.github/workflows/list_missing.yml b/.github/workflows/list_missing.yml
deleted file mode 100644
index 0ed7e01..0000000
--- a/.github/workflows/list_missing.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-on: [push, pull_request]
-
-jobs:
- missing:
- name: List missing
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- - run: ./test/test.sh --missing
- - run: ./test/test.sh --coverage >> $GITHUB_ENV
- - uses: schneegans/dynamic-badges-action@v1.7.0
- with:
- auth: ${{ secrets.GIST_COVERAGE_SECRET }}
- gistID: 73fb3c58350c58b623f221fc237def62
- filename: tcr_coverage.json
- label: coverage
- message: ${{ env.COVERED }}/${{ env.TOTAL }}
- namedLogo: GitHub
- valColorRange: ${{ env.TOTAL }}
- minColorRange: ${{ env.REQUIRED }}
- maxColorRange: ${{ env.TOTAL }}
diff --git a/.github/workflows/test_all.yml b/.github/workflows/test_all.yml
deleted file mode 100644
index bb2489b..0000000
--- a/.github/workflows/test_all.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-on:
- workflow_dispatch:
-
-jobs:
- all:
- strategy:
- matrix:
- os: [ubuntu-latest, ubuntu-22.04]
- name: Test all (${{ matrix.os }})
- runs-on: ${{ matrix.os }}
- timeout-minutes: 20
- steps:
- - uses: actions/checkout@v4
- - run: ./test/test.sh
diff --git a/.github/workflows/test_datastructures.yml b/.github/workflows/test_datastructures.yml
deleted file mode 100644
index dffcf0a..0000000
--- a/.github/workflows/test_datastructures.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-on:
- push:
- paths:
- - 'content/datastructures/**'
- - 'test/datastructures/**'
- pull_request:
- paths:
- - 'content/datastructures/**'
- - 'test/datastructures/**'
- workflow_dispatch:
-
-jobs:
- datastructures:
- strategy:
- matrix:
- os: [ubuntu-latest, ubuntu-22.04]
- name: Test datastructures (${{ matrix.os }})
- runs-on: ${{ matrix.os }}
- timeout-minutes: 10
- steps:
- - uses: actions/checkout@v4
- - run: ./test/test.sh datastructures
diff --git a/.github/workflows/test_geometry.yml b/.github/workflows/test_geometry.yml
deleted file mode 100644
index fc45e5c..0000000
--- a/.github/workflows/test_geometry.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-on:
- push:
- paths:
- - 'content/geometry/**'
- - 'test/geometry/**'
- pull_request:
- paths:
- - 'content/geometry/**'
- - 'test/geometry/**'
- workflow_dispatch:
-
-jobs:
- geometry:
- strategy:
- matrix:
- os: [ubuntu-latest, ubuntu-22.04]
- name: Test geometry (${{ matrix.os }})
- runs-on: ${{ matrix.os }}
- timeout-minutes: 10
- steps:
- - uses: actions/checkout@v4
- - run: ./test/test.sh geometry
diff --git a/.github/workflows/test_graph.yml b/.github/workflows/test_graph.yml
deleted file mode 100644
index 505707c..0000000
--- a/.github/workflows/test_graph.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-on:
- push:
- paths:
- - 'content/graph/**'
- - 'test/graph/**'
- pull_request:
- paths:
- - 'content/graph/**'
- - 'test/graph/**'
- workflow_dispatch:
-
-jobs:
- graph:
- strategy:
- matrix:
- os: [ubuntu-latest, ubuntu-22.04]
- name: Test graph (${{ matrix.os }})
- runs-on: ${{ matrix.os }}
- timeout-minutes: 10
- steps:
- - uses: actions/checkout@v4
- - run: ./test/test.sh graph
diff --git a/.github/workflows/test_math.yml b/.github/workflows/test_math.yml
deleted file mode 100644
index ef759c0..0000000
--- a/.github/workflows/test_math.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-on:
- push:
- paths:
- - 'content/math/**'
- - 'test/math/**'
- pull_request:
- paths:
- - 'content/math/**'
- - 'test/math/**'
- workflow_dispatch:
-
-jobs:
- math:
- strategy:
- matrix:
- os: [ubuntu-latest, ubuntu-22.04]
- name: Test math (${{ matrix.os }})
- runs-on: ${{ matrix.os }}
- timeout-minutes: 10
- steps:
- - uses: actions/checkout@v4
- - run: ./test/test.sh math
diff --git a/.github/workflows/test_other.yml b/.github/workflows/test_other.yml
deleted file mode 100644
index 14c0550..0000000
--- a/.github/workflows/test_other.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-on:
- push:
- paths:
- - 'content/other/**'
- - 'test/other/**'
- pull_request:
- paths:
- - 'content/other/**'
- - 'test/other/**'
- workflow_dispatch:
-
-jobs:
- other:
- strategy:
- matrix:
- os: [ubuntu-latest, ubuntu-22.04]
- name: Test other (${{ matrix.os }})
- runs-on: ${{ matrix.os }}
- timeout-minutes: 10
- steps:
- - uses: actions/checkout@v4
- - run: ./test/test.sh other
diff --git a/.github/workflows/test_pdf.yml b/.github/workflows/test_pdf.yml
deleted file mode 100644
index ab273f7..0000000
--- a/.github/workflows/test_pdf.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-on:
- push:
- paths:
- - 'content/**'
- - 'Makefile'
- pull_request:
- paths:
- - 'content/**'
- - 'Makefile'
- workflow_dispatch:
-
-jobs:
- pdf_22-04:
- name: Test pdf (ubuntu-22.04)
- runs-on: ubuntu-22.04
- timeout-minutes: 5
- steps:
- - uses: actions/checkout@v4
- - run: |
- sudo apt-get update
- sudo apt-get install latexmk texlive-latex-base texlive-latex-recommended texlive-latex-extra texlive-lang-german texlive-fonts-extra
- - run: make
-
- pdf_latest:
- name: Test pdf (ubuntu-latest)
- runs-on: ubuntu-22.04
- timeout-minutes: 5
- steps:
- - uses: actions/checkout@v4
- - run: |
- sudo apt-get update
- sudo apt-get install latexmk texlive-latex-base texlive-latex-recommended texlive-latex-extra texlive-lang-german texlive-fonts-extra
- - run: make
- - uses: exuanbo/actions-deploy-gist@v1
- with:
- token: ${{ secrets.GIST_COVERAGE_SECRET }}
- gist_id: 73fb3c58350c58b623f221fc237def62
- file_path: tcr.pdf
- file_type: binary
diff --git a/.github/workflows/test_string.yml b/.github/workflows/test_string.yml
deleted file mode 100644
index 0d79040..0000000
--- a/.github/workflows/test_string.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-on:
- push:
- paths:
- - 'content/string/**'
- - 'test/string/**'
- pull_request:
- paths:
- - 'content/string/**'
- - 'test/string/**'
- workflow_dispatch:
-
-jobs:
- string:
- strategy:
- matrix:
- os: [ubuntu-latest, ubuntu-22.04]
- name: Test string (${{ matrix.os }})
- runs-on: ${{ matrix.os }}
- timeout-minutes: 10
- steps:
- - uses: actions/checkout@v4
- - run: ./test/test.sh string
diff --git a/.github/workflows/test_template.yml b/.github/workflows/test_template.yml
deleted file mode 100644
index 01f57bb..0000000
--- a/.github/workflows/test_template.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-on:
- push:
- paths:
- - 'content/template/**'
- - 'test/template/**'
- pull_request:
- paths:
- - 'content/template/**'
- - 'test/template/**'
- workflow_dispatch:
-
-jobs:
- template:
- strategy:
- matrix:
- os: [ubuntu-latest, ubuntu-22.04]
- name: Test template (${{ matrix.os }})
- runs-on: ${{ matrix.os }}
- timeout-minutes: 10
- steps:
- - uses: actions/checkout@v4
- - run: ./test/test.sh template
diff --git a/.gitignore b/.gitignore
index 4c03241..01e9771 100644
--- a/.gitignore
+++ b/.gitignore
@@ -221,9 +221,9 @@ TSWLatexianTemp*
*~
-# ignore build dir
-build/*
-# dont ignore build tcr
-!tcr.pdf
+# files produced by the testing system
+*.test
+*.ok
+*.d
# ignore build test awk files
test/awk/*
diff --git a/Makefile b/Makefile
index b3538cf..02a5361 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,26 @@
-all:
- cd content; latexmk -pdf tcr -output-directory=.. -aux-directory=../build/ -usepretex="\newcommand{\gitorigin}{https://github.com/mzuenni/ContestReference/tree/$(shell git branch --show-current)/content/}"
-clean:
- rm -r build/*
+LATEXMK = latexmk -interaction=nonstopmode
+
+tcr.pdf: FORCE
+ cd content && $(LATEXMK)
+
+tcr-opt.pdf: FORCE
+ cd content && $(LATEXMK) -r latexmk.opt
+
+pdf: tcr.pdf tcr-opt.pdf
+
+all: pdf test
+
+test:
+ +gmake -C test
+
+clean: cleanpdf cleantest
+
+cleanpdf:
+ cd content && $(LATEXMK) -C
+ cd content && $(LATEXMK) -r latexmk.opt -C
+
+cleantest:
+ +-gmake -C clean
+
+FORCE:
+.PHONY: all pdf test clean cleanpdf cleantest FORCE
diff --git a/README.md b/README.md
deleted file mode 100644
index 7edf67b..0000000
--- a/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# KIT Team Contest Reference
-> [!TIP]
-> You can use this [pdf.js link](https://mozilla.github.io/pdf.js/web/viewer.html?file=https://raw.githubusercontent.com/mzuenni/ContestReference/new-master/tcr.pdf) to watch the commited pdf with working links,
-> or [this one](https://mozilla.github.io/pdf.js/web/viewer.html?file=https://gist.githubusercontent.com/mzuenni/73fb3c58350c58b623f221fc237def62/raw/tcr.pdf) to look at the current build.
-
-The KIT teams have used this document for ICPC-style contests since roughly 2019.
-It consists of 25 pages of copy-pasteable C++ code and one extra page with a checklist for the practice session.
-
-## Testing
-To make this document as useful as possible we try to (automatically) stress test all code in this repository.
-Nonetheless, not all code is tested and tests might not catch every bug.
-If you find a bug please [open an issue](https://github.com/mzuenni/ContestReference/issues/new).
-If you think code can be changed, improved or replaced also feel free to open an issue or make open a pull request.
-
-[![test c++](https://github.com/mzuenni/ContestReference/actions/workflows/test_all.yml/badge.svg)](https://github.com/mzuenni/ContestReference/actions/workflows/test_all.yml/)
-[![test pdf](https://github.com/mzuenni/ContestReference/actions/workflows/test_pdf.yml/badge.svg)](https://github.com/mzuenni/ContestReference/actions/workflows/test_pdf.yml/)
-[![coverage](https://img.shields.io/endpoint?url=https://gist.githubusercontent.com/mzuenni/73fb3c58350c58b623f221fc237def62/raw/tcr_coverage.json)](https://github.com/mzuenni/ContestReference/actions/workflows/list_missing.yml)
-## Other Resources
-The code in this repo has been accumulated over many years and the origin of the code is unfortunately unknown for most of the snippets.
-Even though much code is written from scratch, plenty of code has been copied from others and just adjusted to our coding style.
-Here is an (incomplete) list of resources that we use (besides those from previous versions):
- - https://github.com/indy256/codelibrary
- - https://github.com/spaghetti-source/algorithm
- - https://github.com/kth-competitive-programming/kactl
-
-## Previous Versions
-- https://github.com/mzuenni/ContestReference/tree/master (2018-2019)
-- https://github.com/pjungeblut/ChaosKITs (2016-2018)
-- https://github.com/niklasb/contest-algos (2012-2016)
-
-## Testing Status
- - [![test datastructures](https://github.com/mzuenni/ContestReference/actions/workflows/test_datastructures.yml/badge.svg)](https://github.com/mzuenni/ContestReference/actions/workflows/test_datastructures.yml/)
- - [![test geometry](https://github.com/mzuenni/ContestReference/actions/workflows/test_geometry.yml/badge.svg)](https://github.com/mzuenni/ContestReference/actions/workflows/test_geometry.yml/)
- - [![test graph](https://github.com/mzuenni/ContestReference/actions/workflows/test_graph.yml/badge.svg)](https://github.com/mzuenni/ContestReference/actions/workflows/test_graph.yml/)
- - [![test math](https://github.com/mzuenni/ContestReference/actions/workflows/test_math.yml/badge.svg)](https://github.com/mzuenni/ContestReference/actions/workflows/test_math.yml/)
- - [![test other](https://github.com/mzuenni/ContestReference/actions/workflows/test_other.yml/badge.svg)](https://github.com/mzuenni/ContestReference/actions/workflows/test_other.yml/)
- - [![test string](https://github.com/mzuenni/ContestReference/actions/workflows/test_string.yml/badge.svg)](https://github.com/mzuenni/ContestReference/actions/workflows/test_string.yml/)
- - [![test template](https://github.com/mzuenni/ContestReference/actions/workflows/test_template.yml/badge.svg)](https://github.com/mzuenni/ContestReference/actions/workflows/test_template.yml/)
diff --git a/content/datastructures/datastructures.tex b/content/datastructures/datastructures.tex
index c9f3d2a..c4bd312 100644
--- a/content/datastructures/datastructures.tex
+++ b/content/datastructures/datastructures.tex
@@ -10,7 +10,7 @@
\subsubsection{Lazy Propagation}
Assignment modifications, sum queries \\
- \method{lower\_bound}{erster Index in $[l, r)$ $\geq$ x (erfordert max-combine)}{\log(n)}
+ \method{binary\_search}{kleinstes $x$ in $[l, r]$ mit $f(\text{query}(l, x))$ (monoton in $x$)}{\log(n)}
\sourcecode{datastructures/lazyPropagation.cpp}
\end{algorithm}
@@ -20,6 +20,8 @@
\method{kth}{sort $[l, r)[k]$}{\log(\Sigma)}
\method{countSmaller}{Anzahl elemente in $[l, r)$ kleiner als $k$}{\log(\Sigma)}
\end{methods}
+ $\Sigma$ ist die Gr\"o\ss e des Eingabebereichs, d.h.
+ $\mathit{max} - \mathit{min}$.
\sourcecode{datastructures/waveletTree.cpp}
\end{algorithm}
\columnbreak
@@ -27,15 +29,15 @@
\begin{algorithm}{Fenwick Tree}
\begin{methods}
\method{init}{baut den Baum auf}{n\*\log(n)}
- \method{prefix\_sum}{summe von $[0, i]$}{\log(n)}
+ \method{prefix\_sum}{summe von $[0, i)$}{\log(n)}
\method{update}{addiert ein Delta zu einem Element}{\log(n)}
\end{methods}
\sourcecode{datastructures/fenwickTree.cpp}
\begin{methods}
\method{init}{baut den Baum auf}{n\*\log(n)}
- \method{prefix\_sum}{summe von [$0, i]$}{\log(n)}
- \method{update}{addiert ein Delta zu allen Elementen $[l, r)$. $l\leq r$!}{\log(n)}
+ \method{prefix\_sum}{summe von $[0, i)$}{\log(n)}
+ \method{update}{addiert ein Delta zu allen Elementen $[l, r)$}{\log(n)}
\end{methods}
\sourcecode{datastructures/fenwickTree2.cpp}
\end{algorithm}
@@ -56,7 +58,7 @@
\begin{algorithm}{Range Minimum Query}
\begin{methods}
\method{init}{baut Struktur auf}{n\*\log(n)}
- \method{queryIdempotent}{Index des Minimums in $[l, r)$. $l<r$!}{1}
+ \method{query}{Index des Minimums in $[l, r)$}{1}
\end{methods}
\begin{itemize}
\item \code{better}-Funktion muss idempotent sein!
@@ -64,6 +66,14 @@
\sourcecode{datastructures/sparseTable.cpp}
\end{algorithm}
+\begin{algorithm}[optional]{Range Aggregate Query}
+ \begin{methods}
+ \method{init}{baut Struktur auf}{n\*\log(n)}
+ \method{query}{Aggregat über $[l,r)$}{1}
+ \end{methods}
+ \sourcecode{datastructures/sparseTableDisjoint.cpp}
+\end{algorithm}
+
\begin{algorithm}{STL-Bitset}
\sourcecode{datastructures/bitset.cpp}
\end{algorithm}
@@ -80,30 +90,43 @@
\end{methods}
\sourcecode{datastructures/LCT.cpp}
\end{algorithm}
-\clearpage
+\columnbreak
-\begin{algorithm}{Lichao}
- \sourcecode{datastructures/lichao.cpp}
+\begin{algorithm}{Lower Envelope (Convex Hull Optimization)}
+ Um aus einem Lower Envelope einen Upper Envelope zu machen (oder
+ umgekehrt), einfach beim Einfügen der Geraden $m$ und $b$ negieren.
+ \subsubsection{Monotonic}
+ \begin{methods}
+ \method{add}{add line $mx + b$, $m$ is decreasing}{1}
+ \method{query}{minimum value at $x$, $x$ is increasing}{1}
+ \end{methods}
+ \sourcecode{datastructures/monotonicConvexHull.cpp}
+ \subsubsection{Dynamic}
+ \begin{methods}
+ \method{add}{add line $mx + b$}{\log(n)}
+ \method{query}{minimum value at $x$}{\log(n)}
+ \end{methods}
+ \sourcecode{datastructures/dynamicConvexHull.cpp}
+ \subsubsection{Li Chao Tree}
+ Every pair of functions has at most one intersection.
+
+ \begin{methods}
+ \method{insert}{add function}{\log(|xs|)}
+ \method{query}{minimum value at $x$, $x \in xs$}{\log(|xs|)}
+ \end{methods}
+ \sourcecode{datastructures/lichao.cpp}
\end{algorithm}
\begin{algorithm}{Policy Based Data Structures}
- \textbf{Wichtig:} Verwende \code{p.swap(p2)} anstatt \code{swap(p, p2)}!
- \sourcecode{datastructures/stlPriorityQueue.cpp}
- \columnbreak
\sourcecode{datastructures/pbds.cpp}
\end{algorithm}
-\begin{algorithm}{Lower/Upper Envelope (Convex Hull Optimization)}
- Um aus einem lower envelope einen upper envelope zu machen (oder umgekehrt), einfach beim Einfügen der Geraden $m$ und $b$ negieren.
- \sourcecode{datastructures/monotonicConvexHull.cpp}
- \sourcecode{datastructures/dynamicConvexHull.cpp}
-\end{algorithm}
-
\begin{algorithm}{Union-Find}
\begin{methods}
\method{init}{legt $n$ einzelne Unions an}{n}
\method{findSet}{findet den Repräsentanten}{\log(n)}
\method{unionSets}{vereint 2 Mengen}{\log(n)}
+ \method{size}{zählt Elemente in Menge, die $a$ enthält}{\log(n)}
\method{m\*findSet + n\*unionSets}{Folge von Befehlen}{n+m\*\alpha(n)}
\end{methods}
\sourcecode{datastructures/unionFind.cpp}
diff --git a/content/datastructures/dynamicConvexHull.cpp b/content/datastructures/dynamicConvexHull.cpp
index 63e0e13..36ef6f5 100644
--- a/content/datastructures/dynamicConvexHull.cpp
+++ b/content/datastructures/dynamicConvexHull.cpp
@@ -1,15 +1,15 @@
struct Line {
mutable ll m, c, p;
- bool operator<(const Line& o) const {return m < o.m;}
- bool operator<(ll x) const {return p < x;}
+ bool operator<(const Line& o) const { return m < o.m; }
+ bool operator<(ll x) const { return p < x; }
};
struct HullDynamic : multiset<Line, less<>> { // max über Geraden
// (for doubles, use INF = 1/.0, div(a,c) = a/c)
- ll div(ll a, ll c) {return a / c - ((a ^ c) < 0 && a % c);}
+ ll div(ll a, ll c) { return a / c - ((a ^ c) < 0 && a % c); }
bool isect(iterator x, iterator y) {
- if (y == end()) {x->p = INF; return false;}
+ if (y == end()) { x->p = INF; return false; }
if (x->m == y->m) x->p = x->c > y->c ? INF : -INF;
else x->p = div(y->c - x->c, x->m - y->m);
return x->p >= y->p;
@@ -19,13 +19,11 @@ struct HullDynamic : multiset<Line, less<>> { // max über Geraden
auto x = insert({m, c, 0});
while (isect(x, next(x))) erase(next(x));
if (x != begin()) {
- x--;
- if (isect(x, next(x))) {
- erase(next(x));
- isect(x, next(x));
- }}
+ --x;
+ while (isect(x, next(x))) erase(next(x));
+ }
while (x != begin() && prev(x)->p >= x->p) {
- x--;
+ --x;
isect(x, erase(next(x)));
}}
diff --git a/content/datastructures/fenwickTree.cpp b/content/datastructures/fenwickTree.cpp
index eb5cd73..7013613 100644
--- a/content/datastructures/fenwickTree.cpp
+++ b/content/datastructures/fenwickTree.cpp
@@ -1,7 +1,7 @@
vector<ll> tree;
void update(int i, ll val) {
- for (i++; i < sz(tree); i += i & -i) tree[i] += val;
+ for (i++; i < ssize(tree); i += i & -i) tree[i] += val;
}
void init(int n) {
@@ -10,6 +10,6 @@ void init(int n) {
ll prefix_sum(int i) {
ll sum = 0;
- for (i++; i > 0; i -= i & -i) sum += tree[i];
+ for (; i > 0; i &= i-1) sum += tree[i];
return sum;
}
diff --git a/content/datastructures/fenwickTree2.cpp b/content/datastructures/fenwickTree2.cpp
index 9384e3c..7fcdbb9 100644
--- a/content/datastructures/fenwickTree2.cpp
+++ b/content/datastructures/fenwickTree2.cpp
@@ -1,21 +1,21 @@
vector<ll> add, mul;
void update(int l, int r, ll val) {
- for (int tl = l + 1; tl < sz(add); tl += tl & -tl)
+ for (int tl = l + 1; tl < ssize(add); tl += tl & -tl)
add[tl] += val, mul[tl] -= val * l;
- for (int tr = r + 1; tr < sz(add); tr += tr & -tr)
+ for (int tr = r + 1; tr < ssize(add); tr += tr & -tr)
add[tr] -= val, mul[tr] += val * r;
}
-void init(vector<ll>& v) {
- mul.assign(sz(v) + 1, 0);
- add.assign(sz(v) + 1, 0);
- for(int i = 0; i < sz(v); i++) update(i, i + 1, v[i]);
+void init(vector<ll> &v) {
+ mul.assign(size(v) + 1, 0);
+ add.assign(size(v) + 1, 0);
+ for(int i = 0; i < ssize(v); i++) update(i, i + 1, v[i]);
}
ll prefix_sum(int i) {
- ll res = 0; i++;
- for (int ti = i; ti > 0; ti -= ti & -ti)
+ ll res = 0;
+ for (int ti = i; ti > 0; ti &= ti-1)
res += add[ti] * i + mul[ti];
return res;
}
diff --git a/content/datastructures/lazyPropagation.cpp b/content/datastructures/lazyPropagation.cpp
index ab91364..a5be822 100644
--- a/content/datastructures/lazyPropagation.cpp
+++ b/content/datastructures/lazyPropagation.cpp
@@ -1,23 +1,22 @@
struct SegTree {
using T = ll; using U = ll;
- int n;
static constexpr T E = 0; // Neutral element for combine
- static constexpr U UF = INF; // Unused value by updates
- vector<T> tree;
+ static constexpr U UF = 1e18; // Unused value by updates
+ int n;
+ vector<T> tree; vector<U> lazy;
int h;
- vector<U> lazy;
- vector<int> k; // size of segments (optional)
+ vector<ll> k; // size of segments (optional)
- SegTree(const vector<T>& a) : n(sz(a) + 1), tree(2 * n, E),
+ SegTree(const vector<T>& a) : n(ssize(a) + 1), tree(2 * n, E),
//SegTree(int size, T def = E) : n(size + 1), tree(2 * n, def),
- h(__lg(2 * n)), lazy(n, UF), k(2 * n, 1) {
- copy(all(a), tree.begin() + n);
+ lazy(n, UF), h(__lg(2 * n)), k(2 * n, 1) {
+ ranges::copy(a, tree.begin() + n);
for (int i = n - 1; i > 0; i--) {
k[i] = 2 * k[2 * i];
tree[i] = comb(tree[2 * i], tree[2 * i + 1]);
}}
- T comb(T a, T b) {return a + b;} // Modify this + E
+ T comb(T a, T b) { return a + b; } // Modify this + E
void apply(int i, U val) { // And this + UF
tree[i] = val * k[i];
@@ -44,17 +43,17 @@ struct SegTree {
void update(int l, int r, U val) {
l += n, r += n;
int l0 = l, r0 = r;
- push(l0), push(r0 - 1);
+ push(l0), push(r0);
for (; l < r; l /= 2, r /= 2) {
if (l&1) apply(l++, val);
if (r&1) apply(--r, val);
}
- build(l0), build(r0 - 1);
+ build(l0), build(r0);
}
T query(int l, int r) {
l += n, r += n;
- push(l), push(r - 1);
+ push(l), push(r);
T resL = E, resR = E;
for (; l < r; l /= 2, r /= 2) {
if (l&1) resL = comb(resL, tree[l++]);
@@ -64,21 +63,23 @@ struct SegTree {
}
// Optional:
- int lower_bound(int l, int r, T x) {
+ int binary_search(int l, int r, auto &&f) {
+ if (f(E)) return l;
l += n, r += n;
- push(l), push(r - 1);
+ push(l), push(r);
int a[64] = {}, lp = 0, rp = 64;
for (; l < r; l /= 2, r /= 2) {
if (l&1) a[lp++] = l++;
if (r&1) a[--rp] = --r;
}
- for (int i : a) if (i != 0 && tree[i] >= x) { // Modify this
+ T x = E, y = x;
+ for (int i : a) if (i != 0 && f(x = comb(y = x, tree[i]))) {
while (i < n) {
push_down(i);
- if (tree[2 * i] >= x) i = 2 * i; // And this
- else i = 2 * i + 1;
+ if (f(x = comb(y, tree[2*i]))) i = 2 * i;
+ else i = 2 * i + 1, y = x;
}
- return i - n;
+ return i - n + 1;
}
return -1;
}
diff --git a/content/datastructures/lichao.cpp b/content/datastructures/lichao.cpp
index 1318ca7..bdbf5f9 100644
--- a/content/datastructures/lichao.cpp
+++ b/content/datastructures/lichao.cpp
@@ -1,9 +1,10 @@
vector<ll> xs; // IMPORTANT: Initialize before constructing!
-int findX(int i) {return lower_bound(all(xs), i) - begin(xs);}
+int findX(int i) {
+ return ranges::lower_bound(xs, i) - begin(xs); }
-struct Fun { // Default: Linear function. Change as needed.
+struct Fun { // Default: Linear function. Change as needed.
ll m, c;
- ll operator()(int x) {return m*xs[x] + c;}
+ ll operator()(int x) { return m*xs[x] + c; }
};
// Default: Computes min. Change lines with comment for max.
@@ -11,18 +12,18 @@ struct Lichao {
static constexpr Fun id = {0, INF}; // {0, -INF}
int n, cap;
vector<Fun> seg;
- Lichao() : n(sz(xs)), cap(2 << __lg(n)), seg(2 * cap, id) {}
-
+ Lichao() : n(ssize(xs)), cap(2 << __lg(n)), seg(2 * cap, id) {}
+
void _insert(Fun f, int l, int r, int i) {
while (i < 2 * cap) {
int m = (l+r)/2;
- if (m >= n) {r = m; i = 2*i; continue;}
+ if (m >= n) { r = m; i = 2*i; continue; }
Fun &g = seg[i];
if (f(m) < g(m)) swap(f, g); // >
if (f(l) < g(l)) r = m, i = 2*i; // >
else l = m, i = 2*i+1;
}}
- void insert(Fun f) {_insert(f, 0, cap, 1);}
+ void insert(Fun f) { _insert(f, 0, cap, 1); }
void _segmentInsert(Fun f, int l, int r, int a, int b, int i) {
if (l <= a && b <= r) _insert(f, a, b, i);
@@ -42,5 +43,5 @@ struct Lichao {
}
return ans;
}
- ll query(ll x) {return _query(findX(x));}
+ ll query(ll x) { return _query(findX(x)); }
};
diff --git a/content/datastructures/monotonicConvexHull.cpp b/content/datastructures/monotonicConvexHull.cpp
index f1721ae..295acc4 100644
--- a/content/datastructures/monotonicConvexHull.cpp
+++ b/content/datastructures/monotonicConvexHull.cpp
@@ -1,27 +1,25 @@
-// Min über Geraden mit MONOTONEN Inserts UND Queries. Jede neue
-// Gerade hat kleineres pair(m, c) als alle vorherigen.
-struct Line {
- ll m, c;
- ll operator()(ll x) {return m*x+c;}
-};
+struct Envelope {
+ struct Line {
+ ll m, b;
+ ll operator()(ll x) { return m*x+b; }
+ };
-vector<Line> ls;
-ll ptr = 0;
+ vector<Line> ls;
+ int ptr = 0;
-bool bad(Line l1, Line l2, Line l3) {
- return (l3.c-l1.c)*(l1.m-l2.m) < (l2.c-l1.c)*(l1.m-l3.m);
-}
+ static bool bad(Line l1, Line l2, Line l3) {
+ return (l3.b-l1.b)*(l1.m-l2.m) < (l2.b-l1.b)*(l1.m-l3.m);
+ }
-void add(ll m, ll c) { // m fallend, Laufzeit O(1) amortisiert
- while (sz(ls) > 1 && bad(ls.end()[-2], ls.end()[-1], {m, c})) {
- ls.pop_back();
+ void add(ll m, ll b) {
+ while (ssize(ls) > 1
+ && bad(ls.end()[-2], ls.back(), {m,b})) ls.pop_back();
+ ls.push_back({m, b});
+ ptr = min(ptr, (int)ssize(ls) - 1);
}
- ls.push_back({m, c});
- ptr = min(ptr, sz(ls) - 1);
-}
-ll query(ll x) { // x >= letztes x, Laufzeit: O(1) amortisiert
- ptr = min(ptr, sz(ls) - 1);
- while (ptr + 1 < sz(ls) && ls[ptr + 1](x) < ls[ptr](x)) ptr++;
- return ls[ptr](x);
-} \ No newline at end of file
+ ll query(ll x) {
+ while (ptr < ssize(ls)-1 && ls[ptr+1](x) < ls[ptr](x)) ptr++;
+ return ls[ptr](x);
+ }
+};
diff --git a/content/datastructures/pbds.cpp b/content/datastructures/pbds.cpp
index de0ace6..734bf91 100644
--- a/content/datastructures/pbds.cpp
+++ b/content/datastructures/pbds.cpp
@@ -1,14 +1,22 @@
+#include <ext/pb_ds/priority_queue.hpp>
+template<typename T>
+using pQueue = __gnu_pbds::priority_queue<T>; //<T, greater<T>>
+auto it = pq.push(5); // O(1)
+pq.modify(it, 6); // O(log n)
+pq.erase(it); // O(log n)
+pq.join(pq2); // O(1)
+pq.swap(pq2); // O(1)
+
#include <ext/pb_ds/assoc_container.hpp>
using namespace __gnu_pbds;
template<typename T>
using Tree = tree<T, null_type, less<T>, rb_tree_tag,
tree_order_statistics_node_update>;
-// T.order_of_key(x): number of elements strictly less than x
-// *T.find_by_order(k): k-th element
+T.order_of_key(x); // number of elements strictly less than x
+auto it = T.find_by_order(k); // k-th element
constexpr uint64_t RNG = ll(2e18 * acos(-1)) | 199; // random odd
-template<typename T>
-struct chash {
+template<typename T> struct chash {
size_t operator()(T o) const {
return __builtin_bswap64(hash<T>()(o) * RNG);
}};
diff --git a/content/datastructures/persistent.cpp b/content/datastructures/persistent.cpp
index f26680d..ed2f891 100644
--- a/content/datastructures/persistent.cpp
+++ b/content/datastructures/persistent.cpp
@@ -1,18 +1,18 @@
-template<typename T>
-struct persistent {
- int& time;
- vector<pair<int, T>> data;
-
- persistent(int& time, T value = {})
- : time(time), data(1, {2*time, value}) {}
-
- T get(int t) {
- return prev(upper_bound(all(data),pair{2*t+1, T{}}))->second;
- }
-
- int set(T value) {
- time++;
- data.push_back({2*time, value});
- return time;
- }
-};
+template<typename T>
+struct persistent {
+ int& time;
+ vector<pair<int, T>> data;
+
+ persistent(int& time, T value = {})
+ : time(time), data(1, {2*time, value}) {}
+
+ T get(int t) {
+ return ranges::upper_bound(data,pair{2*t+1, T{}})[-1].second;
+ }
+
+ int set(T value) {
+ time++;
+ data.push_back({2*time, value});
+ return time;
+ }
+};
diff --git a/content/datastructures/persistentArray.cpp b/content/datastructures/persistentArray.cpp
index 8326700..903bd0e 100644
--- a/content/datastructures/persistentArray.cpp
+++ b/content/datastructures/persistentArray.cpp
@@ -1,24 +1,24 @@
-template<typename T>
-struct persistentArray {
- int time;
- vector<persistent<T>> data;
- vector<pair<int, int>> mods;
-
- persistentArray(int n, T value = {})
- : time(0), data(n, {time, value}) {}
-
- T get(int p, int t) {return data[p].get(t);}
-
- int set(int p, T value) {
- mods.push_back({p, data[p].set(value)});
- return mods.back().second;
- }
-
- void reset(int t) {
- while (!mods.empty() && mods.back().second > t) {
- data[mods.back().first].data.pop_back();
- mods.pop_back();
- }
- time = t;
- }
-};
+template<typename T>
+struct persistentArray {
+ int time;
+ vector<persistent<T>> data;
+ vector<pair<int, int>> mods;
+
+ persistentArray(int n, T value = {})
+ : time(0), data(n, {time, value}) {}
+
+ T get(int p, int t) { return data[p].get(t); }
+
+ int set(int p, T value) {
+ mods.push_back({p, data[p].set(value)});
+ return mods.back().second;
+ }
+
+ void reset(int t) {
+ while (!mods.empty() && mods.back().second > t) {
+ data[mods.back().first].data.pop_back();
+ mods.pop_back();
+ }
+ time = t;
+ }
+};
diff --git a/content/datastructures/segmentTree.cpp b/content/datastructures/segmentTree.cpp
index 6b69d0b..1fbf886 100644
--- a/content/datastructures/segmentTree.cpp
+++ b/content/datastructures/segmentTree.cpp
@@ -4,14 +4,15 @@ struct SegTree {
vector<T> tree;
static constexpr T E = 0; // Neutral element for combine
- SegTree(vector<T>& a) : n(sz(a)), tree(2 * n) {
- //SegTree(int size, T val = E) : n(size), tree(2 * n, val) {
- copy(all(a), tree.begin() + n);
+ SegTree(vector<T>& a) : n(ssize(a)), tree(2 * n, E) {
+ ranges::copy(a, tree.begin() + n);
+ //SegTree(int size, T val = E) : n(size), tree(2 * n, E) {
+ // fill(tree.begin() + n, tree.end(), val);
for (int i = n - 1; i > 0; i--) { // remove for range update
tree[i] = comb(tree[2 * i], tree[2 * i + 1]);
}}
- T comb(T a, T b) {return a + b;} // modify this + neutral
+ T comb(T a, T b) { return a + b; } // modify this + neutral
void update(int i, T val) {
tree[i += n] = val; // apply update code
diff --git a/content/datastructures/sparseTable.cpp b/content/datastructures/sparseTable.cpp
index b3f946e..5455ef5 100644
--- a/content/datastructures/sparseTable.cpp
+++ b/content/datastructures/sparseTable.cpp
@@ -6,17 +6,17 @@ struct SparseTable {
return a[lidx] <= a[ridx] ? lidx : ridx;
}
- void init(vector<ll>* vec) {
- int n = sz(*vec);
- a = vec->data();
+ void init(vector<ll> &vec) {
+ int n = ssize(vec);
+ a = vec.data();
st.assign(__lg(n) + 1, vector<int>(n));
- iota(all(st[0]), 0);
+ iota(begin(st[0]), end(st[0]), 0);
for (int j = 0; (2 << j) <= n; j++) {
for (int i = 0; i + (2 << j) <= n; i++) {
st[j + 1][i] = better(st[j][i] , st[j][i + (1 << j)]);
}}}
- int queryIdempotent(int l, int r) {
+ int query(int l, int r) {
if (r <= l) return -1;
int j = __lg(r - l); //31 - builtin_clz(r - l);
return better(st[j][l] , st[j][r - (1 << j)]);
diff --git a/content/datastructures/sparseTableDisjoint.cpp b/content/datastructures/sparseTableDisjoint.cpp
index 55165d4..bcf6b2e 100644
--- a/content/datastructures/sparseTableDisjoint.cpp
+++ b/content/datastructures/sparseTableDisjoint.cpp
@@ -7,16 +7,16 @@ struct DisjointST {
return x + y;
}
- void init(vector<ll>* vec) {
- int n = sz(*vec);
- a = vec->data();
+ void init(vector<ll> &vec) {
+ int n = ssize(vec);
+ a = vec.data();
dst.assign(__lg(n) + 1, vector<ll>(n + 1, neutral));
for (int h = 0, l = 1; l <= n; h++, l *= 2) {
for (int c = l; c < n + l; c += 2 * l) {
for (int i = c; i < min(n, c + l); i++)
- dst[h][i + 1] = combine(dst[h][i], vec->at(i));
+ dst[h][i + 1] = combine(dst[h][i], vec[i]);
for (int i = min(n, c); i > c - l; i--)
- dst[h][i - 1] = combine(vec->at(i - 1), dst[h][i]);
+ dst[h][i - 1] = combine(vec[i - 1], dst[h][i]);
}}}
ll query(int l, int r) {
diff --git a/content/datastructures/stlHashMap.cpp b/content/datastructures/stlHashMap.cpp
deleted file mode 100644
index b107dde..0000000
--- a/content/datastructures/stlHashMap.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-#include <ext/pb_ds/assoc_container.hpp>
-using namespace __gnu_pbds;
-
-template<typename T>
-struct betterHash {
- size_t operator()(T o) const {
- size_t h = hash<T>()(o) ^ 42394245; //random value
- h = ((h >> 16) ^ h) * 0x45d9f3b;
- h = ((h >> 16) ^ h) * 0x45d9f3b;
- h = ((h >> 16) ^ h);
- return h;
-}};
-
-template<typename K, typename V, typename H = betterHash<K>>
-using hashMap = gp_hash_table<K, V, H>;
-template<typename K, typename H = betterHash<K>>
-using hashSet = gp_hash_table<K, null_type, H>;
diff --git a/content/datastructures/stlPriorityQueue.cpp b/content/datastructures/stlPriorityQueue.cpp
deleted file mode 100644
index 32b2455..0000000
--- a/content/datastructures/stlPriorityQueue.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-#include <ext/pb_ds/priority_queue.hpp>
-template<typename T>
-using pQueue = __gnu_pbds::priority_queue<T>; //<T, greater<T>>
-
-auto it = pq.push(5);
-pq.modify(it, 6);
-pq.join(pq2);
-// push, join are O(1), pop, modify, erase O(log n) amortized
diff --git a/content/datastructures/stlTree.cpp b/content/datastructures/stlTree.cpp
deleted file mode 100644
index fbb68b9..0000000
--- a/content/datastructures/stlTree.cpp
+++ /dev/null
@@ -1,13 +0,0 @@
-#include <ext/pb_ds/assoc_container.hpp>
-#include <ext/pb_ds/tree_policy.hpp>
-using namespace std; using namespace __gnu_pbds;
-template<typename T>
-using Tree = tree<T, null_type, less<T>, rb_tree_tag,
- tree_order_statistics_node_update>;
-
-int main() {
- Tree<int> X;
- for (int i : {1, 2, 4, 8, 16}) X.insert(i);
- *X.find_by_order(3); // => 8
- X.order_of_key(10); // => 4 = min i, mit X[i] >= 10
-}
diff --git a/content/datastructures/treap.cpp b/content/datastructures/treap.cpp
index c5a60e9..bddfdb4 100644
--- a/content/datastructures/treap.cpp
+++ b/content/datastructures/treap.cpp
@@ -66,7 +66,7 @@ struct Treap {
void insert(int i, ll val) { // and i = val
auto [left, right] = split(root, i);
treap.emplace_back(val);
- left = merge(left, sz(treap) - 1);
+ left = merge(left, ssize(treap) - 1);
root = merge(left, right);
}
diff --git a/content/datastructures/waveletTree.cpp b/content/datastructures/waveletTree.cpp
index 090cdb2..55167b6 100644
--- a/content/datastructures/waveletTree.cpp
+++ b/content/datastructures/waveletTree.cpp
@@ -1,25 +1,20 @@
struct WaveletTree {
- using it = vector<ll>::iterator;
- WaveletTree *ln = nullptr, *rn = nullptr;
+ unique_ptr<WaveletTree> ln, rn;
vector<int> b = {0};
ll lo, hi;
- WaveletTree(vector<ll> in) : WaveletTree(all(in)) {}
-
- WaveletTree(it from, it to) : // call above one
- lo(*min_element(from, to)), hi(*max_element(from, to) + 1) {
+ WaveletTree(auto in) : lo(*ranges::min_element(in)),
+ hi(*ranges::max_element(in) + 1) {
ll mid = (lo + hi) / 2;
- auto f = [&](ll x) {return x < mid;};
- for (it c = from; c != to; c++) {
- b.push_back(b.back() + f(*c));
- }
+ auto f = [&](ll x) { return x < mid; };
+ for (ll x: in) b.push_back(b.back() + f(x));
if (lo + 1 >= hi) return;
- it pivot = stable_partition(from, to, f);
- ln = new WaveletTree(from, pivot);
- rn = new WaveletTree(pivot, to);
+ auto right = ranges::stable_partition(in, f);
+ ln = make_unique<WaveletTree>(
+ ranges::subrange(begin(in), begin(right)));
+ rn = make_unique<WaveletTree>(right);
}
- // kth element in sort[l, r) all 0-indexed
ll kth(int l, int r, int k) {
if (k < 0 || l + k >= r) return -1;
if (lo + 1 >= hi) return lo;
@@ -28,13 +23,10 @@ struct WaveletTree {
else return rn->kth(l-b[l], r-b[r], k-inLeft);
}
- // count elements in[l, r) smaller than k
int countSmaller(int l, int r, ll k) {
if (l >= r || k <= lo) return 0;
if (hi <= k) return r - l;
return ln->countSmaller(b[l], b[r], k) +
rn->countSmaller(l-b[l], r-b[r], k);
}
-
- ~WaveletTree() {delete ln; delete rn;}
};
diff --git a/content/geometry/antipodalPoints.cpp b/content/geometry/antipodalPoints.cpp
index 110cc74..b34b175 100644
--- a/content/geometry/antipodalPoints.cpp
+++ b/content/geometry/antipodalPoints.cpp
@@ -1,12 +1,12 @@
vector<pair<int, int>> antipodalPoints(vector<pt>& h) {
- if (sz(h) < 2) return {};
+ if (ssize(h) < 2) return {};
vector<pair<int, int>> result;
for (int i = 0, j = 1; i < j; i++) {
while (true) {
result.push_back({i, j});
- if (cross(h[(i + 1) % sz(h)] - h[i],
- h[(j + 1) % sz(h)] - h[j]) <= 0) break;
- j = (j + 1) % sz(h);
+ if (cross(h[(i + 1) % ssize(h)] - h[i],
+ h[(j + 1) % ssize(h)] - h[j]) <= 0) break;
+ j = (j + 1) % ssize(h);
}}
return result;
}
diff --git a/content/geometry/circle.cpp b/content/geometry/circle.cpp
index 6789c52..155b55c 100644
--- a/content/geometry/circle.cpp
+++ b/content/geometry/circle.cpp
@@ -22,7 +22,7 @@ vector<pt> circleRayIntersection(pt center, double r,
double c = norm(orig - center) - r * r;
double discr = b * b - 4 * a * c;
if (discr >= 0) {
- //t in [0, 1] => schnitt mit Segment [orig, orig + dir]
+ //t in [0, 1] => Schnitt mit Segment [orig, orig + dir]
double t1 = -(b + sqrt(discr)) / (2 * a);
double t2 = -(b - sqrt(discr)) / (2 * a);
if (t1 >= 0) result.push_back(t1 * dir + orig);
diff --git a/content/geometry/closestPair.cpp b/content/geometry/closestPair.cpp
index 9b115f3..bbefa67 100644
--- a/content/geometry/closestPair.cpp
+++ b/content/geometry/closestPair.cpp
@@ -4,12 +4,11 @@ ll rec(vector<pt>::iterator a, int l, int r) {
ll midx = a[m].real();
ll ans = min(rec(a, l, m), rec(a, m, r));
- inplace_merge(a+l, a+m, a+r, [](const pt& x, const pt& y) {
- return x.imag() < y.imag();
- });
+ ranges::inplace_merge(a+l, a+m, a+r, {},
+ [](pt x) { return imag(x); });
pt tmp[8];
- fill(all(tmp), a[l]);
+ ranges::fill(tmp, a[l]);
for (int i = l + 1, next = 0; i < r; i++) {
if (ll x = a[i].real() - midx; x * x < ans) {
for (pt& p : tmp) ans = min(ans, norm(p - a[i]));
@@ -19,9 +18,7 @@ ll rec(vector<pt>::iterator a, int l, int r) {
return ans;
}
-ll shortestDist(vector<pt> a) { // sz(pts) > 1
- sort(all(a), [](const pt& x, const pt& y) {
- return x.real() < y.real();
- });
- return rec(a.begin(), 0, sz(a));
+ll shortestDist(vector<pt> a) { // size(pts) > 1
+ ranges::sort(a, {}, [](pt x) { return real(x); });
+ return rec(a.begin(), 0, ssize(a));
}
diff --git a/content/geometry/convexHull.cpp b/content/geometry/convexHull.cpp
index 1173924..03c6343 100644
--- a/content/geometry/convexHull.cpp
+++ b/content/geometry/convexHull.cpp
@@ -1,18 +1,16 @@
vector<pt> convexHull(vector<pt> pts){
- sort(all(pts), [](const pt& a, const pt& b){
- return real(a) == real(b) ? imag(a) < imag(b)
- : real(a) < real(b);
- });
- pts.erase(unique(all(pts)), pts.end());
+ ranges::sort(pts, {},
+ [](pt x) { return pair{real(x), imag(x)}; });
+ pts.erase(begin(ranges::unique(pts)), end(pts));
int k = 0;
- vector<pt> h(2 * sz(pts));
- auto half = [&](auto begin, auto end, int t) {
- for (auto it = begin; it != end; it++) {
- while (k > t && cross(h[k-2], h[k-1], *it) <= 0) k--;
- h[k++] = *it;
+ vector<pt> h(2 * ssize(pts));
+ auto half = [&](auto &&v, int t) {
+ for (auto x: v) {
+ while (k > t && cross(h[k-2], h[k-1], x) <= 0) k--;
+ h[k++] = x;
}};
- half(all(pts), 1); // Untere Hülle.
- half(next(pts.rbegin()), pts.rend(), k); // Obere Hülle.
+ half(pts, 1); // Untere Hülle.
+ half(pts | views::reverse | views::drop(1), k); // Obere Hülle
h.resize(k);
return h;
}
diff --git a/content/geometry/delaunay.cpp b/content/geometry/delaunay.cpp
index c813892..9ae9061 100644
--- a/content/geometry/delaunay.cpp
+++ b/content/geometry/delaunay.cpp
@@ -3,7 +3,8 @@ using pt = complex<lll>;
constexpr pt INF_PT = pt(2e18, 2e18);
-bool circ(pt p, pt a, pt b, pt c) {// p in circle(A,B,C), ABC must be ccw
+// p in circle(A,B,C), ABC must be ccw
+bool circ(pt p, pt a, pt b, pt c) {
return imag((c-b)*conj(p-c)*(a-p)*conj(b-a)) < 0;
}
@@ -12,10 +13,10 @@ struct QuadEdge {
QuadEdge* onext = nullptr;
pt orig = INF_PT;
bool used = false;
- QuadEdge* rev() const {return rot->rot;}
- QuadEdge* lnext() const {return rot->rev()->onext->rot;}
- QuadEdge* oprev() const {return rot->onext->rot;}
- pt dest() const {return rev()->orig;}
+ QuadEdge* rev() const { return rot->rot; }
+ QuadEdge* lnext() const { return rot->rev()->onext->rot; }
+ QuadEdge* oprev() const { return rot->onext->rot; }
+ pt dest() const { return rev()->orig; }
};
deque<QuadEdge> edgeData;
@@ -98,12 +99,10 @@ pair<QuadEdge*, QuadEdge*> rec(IT l, IT r) {
}
vector<pt> delaunay(vector<pt> pts) {
- if (sz(pts) <= 2) return {};
- sort(all(pts), [](const pt& a, const pt& b) {
- if (real(a) != real(b)) return real(a) < real(b);
- return imag(a) < imag(b);
- });
- QuadEdge* r = rec(all(pts)).first;
+ if (ssize(pts) <= 2) return {};
+ ranges::sort(pts, {},
+ [](pt x) { return pair{real(x), imag(x)}; });
+ QuadEdge* r = rec(begin(pts), end(pts)).first;
vector<QuadEdge*> edges = {r};
while (cross(r->onext->dest(), r->dest(), r->orig) < 0) r = r->onext;
auto add = [&](QuadEdge* e){
@@ -117,7 +116,7 @@ vector<pt> delaunay(vector<pt> pts) {
};
add(r);
pts.clear();
- for (int i = 0; i < sz(edges); i++) {
+ for (int i = 0; i < ssize(edges); i++) {
if (!edges[i]->used) add(edges[i]);
}
return pts;
diff --git a/content/geometry/formulas.cpp b/content/geometry/formulas.cpp
index 5d4e10d..b339451 100644
--- a/content/geometry/formulas.cpp
+++ b/content/geometry/formulas.cpp
@@ -6,20 +6,17 @@ constexpr double PIU = acos(-1.0l); // PIL < PI < PIU
constexpr double PIL = PIU-2e-19l;
// Winkel zwischen Punkt und x-Achse in [-PI, PI].
-double angle(pt a) {return arg(a);}
+double angle(pt a) { return arg(a); }
// rotiert Punkt im Uhrzeigersinn um den Ursprung.
-pt rotate(pt a, double theta) {return a * polar(1.0, theta);}
+pt rotate(pt a, double theta) { return a * polar(1.0, theta); }
// Skalarprodukt.
-auto dot(pt a, pt b) {return real(conj(a) * b);}
-
-// abs()^2.(pre c++20)
-auto norm(pt a) {return dot(a, a);}
+auto dot(pt a, pt b) { return real(conj(a) * b); }
// Kreuzprodukt, 0, falls kollinear.
-auto cross(pt a, pt b) {return imag(conj(a) * b);}
-auto cross(pt p, pt a, pt b) {return cross(a - p, b - p);}
+auto cross(pt a, pt b) { return imag(conj(a) * b); }
+auto cross(pt p, pt a, pt b) { return cross(a - p, b - p); }
// 1 => c links von a->b
// 0 => a, b und c kolliniear
diff --git a/content/geometry/formulas3d.cpp b/content/geometry/formulas3d.cpp
index 63de2ce..66a4644 100644
--- a/content/geometry/formulas3d.cpp
+++ b/content/geometry/formulas3d.cpp
@@ -2,20 +2,20 @@
auto operator|(pt3 a, pt3 b) {
return a.x * b.x + a.y*b.y + a.z*b.z;
}
-auto dot(pt3 a, pt3 b) {return a|b;}
+auto dot(pt3 a, pt3 b) { return a|b; }
// Kreuzprodukt
-pt3 operator*(pt3 a, pt3 b) {return {a.y*b.z - a.z*b.y,
- a.z*b.x - a.x*b.z,
- a.x*b.y - a.y*b.x};}
-pt3 cross(pt3 a, pt3 b) {return a*b;}
+pt3 operator*(pt3 a, pt3 b) { return {a.y*b.z - a.z*b.y,
+ a.z*b.x - a.x*b.z,
+ a.x*b.y - a.y*b.x}; }
+pt3 cross(pt3 a, pt3 b) { return a*b; }
// Länge von a
-double abs(pt3 a) {return sqrt(dot(a, a));}
-double abs(pt3 a, pt3 b) {return abs(b - a);}
+double abs(pt3 a) { return sqrt(dot(a, a)); }
+double abs(pt3 a, pt3 b) { return abs(b - a); }
// Mixedprodukt
-auto mixed(pt3 a, pt3 b, pt3 c) {return a*b|c;};
+auto mixed(pt3 a, pt3 b, pt3 c) { return a*b|c; }
// orientierung von p zu der Ebene durch a, b, c
// -1 => gegen den Uhrzeigersinn,
diff --git a/content/geometry/geometry.tex b/content/geometry/geometry.tex
index 92285c4..9290de4 100644
--- a/content/geometry/geometry.tex
+++ b/content/geometry/geometry.tex
@@ -7,7 +7,7 @@
\sourcecode{geometry/closestPair.cpp}
\end{algorithm}
-\begin{algorithm}{Konvexehülle}
+\begin{algorithm}{Konvexe Hülle}
\begin{methods}
\method{convexHull}{berechnet konvexe Hülle}{n\*\log(n)}
\end{methods}
@@ -18,6 +18,7 @@
\end{itemize}
\sourcecode{geometry/convexHull.cpp}
\end{algorithm}
+\columnbreak
\begin{algorithm}{Rotating calipers}
\begin{methods}
@@ -29,6 +30,7 @@
\subsection{Formeln~~--~\texttt{std::complex}}
\sourcecode{geometry/formulas.cpp}
+\columnbreak
\sourcecode{geometry/linesAndSegments.cpp}
\sourcecode{geometry/sortAround.cpp}
\input{geometry/triangle}
@@ -40,7 +42,7 @@
\sourcecode{geometry/formulas3d.cpp}
\optional{
- \subsection{3D-Kugeln}
+ \subsection{3D-Kugeln \opthint}
\sourcecode{geometry/spheres.cpp}
}
@@ -48,15 +50,22 @@
\sourcecode{geometry/hpi.cpp}
\end{algorithm}
+\begin{algorithm}[optional]{Intersecting Segments}
+ \begin{methods}
+ \method{intersect}{finds ids of intersecting segments}{n\*\log(n)}
+ \end{methods}
+ \sourcecode{geometry/segmentIntersection.cpp}
+\end{algorithm}
+
\begin{algorithm}[optional]{Delaunay Triangulierung}
\begin{methods}
\method{delaunay}{berechnet Triangulierung}{n\*\log(n)}
\end{methods}
- \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Traingulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig.
+ \textbf{WICHTIG:} Wenn alle Punkte kollinear sind gibt es keine Triangulierung! Wenn 4 Punkte auf einem Kreis liegen ist die Triangulierung nicht eindeutig.
\sourcecode{geometry/delaunay.cpp}
\end{algorithm}
\optional{
-\subsection{Geraden}
+\subsection{Geraden \opthint}
\sourcecode{geometry/lines.cpp}
}
diff --git a/content/geometry/hpi.cpp b/content/geometry/hpi.cpp
index 02c71e3..ec27254 100644
--- a/content/geometry/hpi.cpp
+++ b/content/geometry/hpi.cpp
@@ -1,6 +1,6 @@
constexpr ll INF = 0x1FFF'FFFF'FFFF'FFFF; //THIS CODE IS WIP
-bool left(pt p) {return real(p) < 0 ||
+bool left(pt p) {return real(p) < 0 ||
(real(p) == 0 && imag(p) < 0);}
struct hp {
pt from, to;
@@ -11,7 +11,7 @@ struct hp {
bool dummy() const {return from == to;}
pt dir() const {return dummy() ? to : to - from;}
bool operator<(const hp& o) const {
- if (left(dir()) != left(o.dir()))
+ if (left(dir()) != left(o.dir()))
return left(dir()) > left(o.dir());
return cross(dir(), o.dir()) > 0;
}
diff --git a/content/geometry/linesAndSegments.cpp b/content/geometry/linesAndSegments.cpp
index ddab554..985ee24 100644
--- a/content/geometry/linesAndSegments.cpp
+++ b/content/geometry/linesAndSegments.cpp
@@ -28,9 +28,7 @@ pt projectToLine(pt a, pt b, pt p) {
// sortiert alle Punkte pts auf einer Linie entsprechend dir
void sortLine(pt dir, vector<pt>& pts) { // (2d und 3d)
- sort(all(pts), [&](pt a, pt b){
- return dot(dir, a) < dot(dir, b);
- });
+ ranges::sort(pts, {}, [&](pt x) { return dot(dir, x); });
}
// Liegt p auf der Strecke a-b? (nutze < für inberhalb)
@@ -66,7 +64,7 @@ vector<pt> segmentIntersection2(pt a, pt b, pt c, pt d) {
double x = cross(b - a, d - c);
double y = cross(c - a, d - c);
double z = cross(b - a, a - c);
- if (x < 0) {x = -x; y = -y; z = -z;}
+ if (x < 0) { x = -x; y = -y; z = -z; }
if (y < -EPS || y-x > EPS || z < -EPS || z-x > EPS) return {};
if (x > EPS) return {a + y/x*(b - a)};
vector<pt> result;
diff --git a/content/geometry/polygon.cpp b/content/geometry/polygon.cpp
index 1332a4a..474ce88 100644
--- a/content/geometry/polygon.cpp
+++ b/content/geometry/polygon.cpp
@@ -2,7 +2,7 @@
// Punkte gegen den Uhrzeigersinn: positiv, sonst negativ.
double area(const vector<pt>& poly) { //poly[0] == poly.back()
ll res = 0;
- for (int i = 0; i + 1 < sz(poly); i++)
+ for (int i = 0; i + 1 < ssize(poly); i++)
res += cross(poly[i], poly[i + 1]);
return 0.5 * res;
}
@@ -13,7 +13,7 @@ double area(const vector<pt>& poly) { //poly[0] == poly.back()
// selbstschneidenden Polygonen (definitions Sache)
ll windingNumber(pt p, const vector<pt>& poly) {
ll res = 0;
- for (int i = 0; i + 1 < sz(poly); i++) {
+ for (int i = 0; i + 1 < ssize(poly); i++) {
pt a = poly[i], b = poly[i + 1];
if (real(a) > real(b)) swap(a, b);
if (real(a) <= real(p) && real(p) < real(b) &&
@@ -26,7 +26,7 @@ ll windingNumber(pt p, const vector<pt>& poly) {
// check if point is inside polygon (any polygon)
bool inside(pt p, const vector<pt>& poly) {
bool in = false;
- for (int i = 0; i + 1 < sz(poly); i++) {
+ for (int i = 0; i + 1 < ssize(poly); i++) {
pt a = poly[i], b = poly[i + 1];
if (pointOnSegment(a, b, p)) return false; // border counts?
if (real(a) > real(b)) swap(a, b);
@@ -40,7 +40,7 @@ bool inside(pt p, const vector<pt>& poly) {
// convex hull without duplicates, h[0] != h.back()
// apply comments if border counts as inside
bool insideConvex(pt p, const vector<pt>& hull) {
- int l = 0, r = sz(hull) - 1;
+ int l = 0, r = ssize(hull) - 1;
if (cross(hull[0], hull[r], p) >= 0) return false; // > 0
while (l + 1 < r) {
int m = (l + r) / 2;
@@ -51,11 +51,9 @@ bool insideConvex(pt p, const vector<pt>& hull) {
}
void rotateMin(vector<pt>& hull) {
- auto mi = min_element(all(hull), [](const pt& a, const pt& b){
- return real(a) == real(b) ? imag(a) < imag(b)
- : real(a) < real(b);
- });
- rotate(hull.begin(), mi, hull.end());
+ auto mi = ranges::min_element(hull, {},
+ [](pt a) { return pair{real(a), imag(a)}; });
+ ranges::rotate(hull, mi);
}
// convex hulls without duplicates, h[0] != h.back()
@@ -67,7 +65,7 @@ vector<pt> minkowski(vector<pt> ps, vector<pt> qs) {
ps.push_back(ps[1]);
qs.push_back(qs[1]);
vector<pt> res;
- for (ll i = 0, j = 0; i + 2 < sz(ps) || j + 2 < sz(qs);) {
+ for (ll i = 0, j = 0; i+2 < ssize(ps) || j+2 < ssize(qs);) {
res.push_back(ps[i] + qs[j]);
auto c = cross(ps[i + 1] - ps[i], qs[j + 1] - qs[j]);
if(c >= 0) i++;
@@ -83,22 +81,22 @@ double dist(const vector<pt>& ps, vector<pt> qs) {
p.push_back(p[0]);
double res = INF;
bool intersect = true;
- for (ll i = 0; i + 1 < sz(p); i++) {
+ for (ll i = 0; i + 1 < ssize(p); i++) {
intersect &= cross(p[i], p[i+1]) >= 0;
res = min(res, distToSegment(p[i], p[i+1], 0));
}
return intersect ? 0 : res;
}
-bool left(pt of, pt p) {return cross(p, of) < 0 ||
- (cross(p, of) == 0 && dot(p, of) > 0);}
+bool left(pt of, pt p) { return cross(p, of) < 0 ||
+ (cross(p, of) == 0 && dot(p, of) > 0); }
// convex hulls without duplicates, hull[0] == hull.back() and
// hull[0] must be a convex point (with angle < pi)
// returns index of corner where dot(dir, corner) is maximized
int extremal(const vector<pt>& hull, pt dir) {
dir *= pt(0, 1);
- int l = 0, r = sz(hull) - 1;
+ int l = 0, r = ssize(hull) - 1;
while (l + 1 < r) {
int m = (l + r) / 2;
pt dm = hull[m+1]-hull[m];
@@ -110,7 +108,7 @@ int extremal(const vector<pt>& hull, pt dir) {
if (cross(dir, dm) < 0) l = m;
else r = m;
}}
- return r % (sz(hull) - 1);
+ return r % (ssize(hull) - 1);
}
// convex hulls without duplicates, hull[0] == hull.back() and
@@ -126,7 +124,7 @@ vector<int> intersectLine(const vector<pt>& hull, pt a, pt b) {
if (cross(hull[endA], a, b) > 0 ||
cross(hull[endB], a, b) < 0) return {};
- int n = sz(hull) - 1;
+ int n = ssize(hull) - 1;
vector<int> res;
for (auto _ : {0, 1}) {
int l = endA, r = endB;
diff --git a/content/geometry/segmentIntersection.cpp b/content/geometry/segmentIntersection.cpp
index afc01b2..9fdbdb8 100644
--- a/content/geometry/segmentIntersection.cpp
+++ b/content/geometry/segmentIntersection.cpp
@@ -39,10 +39,10 @@ pair<int, int> intersect(vector<seg>& segs) {
events.push_back({s.a, s.id, 1});
events.push_back({s.b, s.id, -1});
}
- sort(all(events));
+ ranges::sort(events, less{});
set<seg> q;
- vector<set<seg>::iterator> where(sz(segs));
+ vector<set<seg>::iterator> where(ssize(segs));
for (auto e : events) {
int id = e.id;
if (e.type > 0) {
diff --git a/content/geometry/sortAround.cpp b/content/geometry/sortAround.cpp
index 98d17a8..7e9d1de 100644
--- a/content/geometry/sortAround.cpp
+++ b/content/geometry/sortAround.cpp
@@ -1,11 +1,11 @@
-bool left(pt p) {return real(p) < 0 ||
- (real(p) == 0 && imag(p) < 0);}
-
-// counter clockwise, starting with "11:59"
-void sortAround(pt p, vector<pt>& ps) {
- sort(all(ps), [&](const pt& a, const pt& b){
- if (left(a - p) != left(b - p))
- return left(a - p) > left(b - p);
- return cross(p, a, b) > 0;
- });
-}
+bool left(pt p) { return real(p) < 0 ||
+ (real(p) == 0 && imag(p) < 0); }
+
+// counter clockwise, starting with "11:59"
+void sortAround(pt p, vector<pt>& ps) {
+ ranges::sort(ps, [&](const pt& a, const pt& b){
+ if (left(a - p) != left(b - p))
+ return left(a - p) > left(b - p);
+ return cross(p, a, b) > 0;
+ });
+}
diff --git a/content/geometry/triangle.cpp b/content/geometry/triangle.cpp
index 534bb10..eab17f4 100644
--- a/content/geometry/triangle.cpp
+++ b/content/geometry/triangle.cpp
@@ -1,5 +1,5 @@
// Mittelpunkt des Dreiecks abc.
-pt centroid(pt a, pt b, pt c) {return (a + b + c) / 3.0;}
+pt centroid(pt a, pt b, pt c) { return (a + b + c) / 3.0; }
// Flächeninhalt eines Dreicks bei bekannten Eckpunkten.
double area(pt a, pt b, pt c) {
@@ -30,7 +30,7 @@ pt circumCenter(pt a, pt b, pt c) {
// -1 => p außerhalb Kreis durch a,b,c
// 0 => p auf Kreis durch a,b,c
// 1 => p im Kreis durch a,b,c
-int insideOutCenter(pt a, pt b, pt c, pt p) {// braucht lll
+int insideOutCenter(pt a, pt b, pt c, pt p) { // braucht lll
return ccw(a,b,c) * sgn(imag((c-b)*conj(p-c)*(a-p)*conj(b-a)));
}
diff --git a/content/graph/2sat.cpp b/content/graph/2sat.cpp
index 75e54e6..2b49fc6 100644
--- a/content/graph/2sat.cpp
+++ b/content/graph/2sat.cpp
@@ -4,19 +4,19 @@ struct sat2 {
sat2(int vars) : n(vars*2), adj(n) {}
- static int var(int i) {return i << 1;} // use this!
+ static int var(int i) { return i << 1; } // use this!
void addImpl(int a, int b) {
adj[a].push_back(b);
adj[1^b].push_back(1^a);
}
- void addEquiv(int a, int b) {addImpl(a, b); addImpl(b, a);}
- void addOr(int a, int b) {addImpl(1^a, b);}
- void addXor(int a, int b) {addOr(a, b); addOr(1^a, 1^b);}
- void addTrue(int a) {addImpl(1^a, a);}
- void addFalse(int a) {addTrue(1^a);}
- void addAnd(int a, int b) {addTrue(a); addTrue(b);}
- void addNand(int a, int b) {addOr(1^a, 1^b);}
+ void addEquiv(int a, int b) { addImpl(a, b); addImpl(b, a); }
+ void addOr(int a, int b) { addImpl(1^a, b);}
+ void addXor(int a, int b) { addOr(a, b); addOr(1^a, 1^b); }
+ void addTrue(int a) { addImpl(1^a, a);}
+ void addFalse(int a) { addTrue(1^a);}
+ void addAnd(int a, int b) { addTrue(a); addTrue(b); }
+ void addNand(int a, int b) { addOr(1^a, 1^b); }
bool solve() {
scc(); //scc code von oben
diff --git a/content/graph/LCA_sparse.cpp b/content/graph/LCA_sparse.cpp
index 221b5ed..1da8876 100644
--- a/content/graph/LCA_sparse.cpp
+++ b/content/graph/LCA_sparse.cpp
@@ -5,12 +5,12 @@ struct LCA {
SparseTable st; //sparse table @\sourceref{datastructures/sparseTable.cpp}@
void init(vector<vector<int>>& adj, int root) {
- depth.assign(2 * sz(adj), 0);
- visited.assign(2 * sz(adj), -1);
- first.assign(sz(adj), 2 * sz(adj));
+ depth.assign(2 * ssize(adj), 0);
+ visited.assign(2 * ssize(adj), -1);
+ first.assign(ssize(adj), 2 * ssize(adj));
idx = 0;
dfs(adj, root);
- st.init(&depth);
+ st.init(depth);
}
void dfs(vector<vector<int>>& adj, int v, ll d=0) {
@@ -18,15 +18,15 @@ struct LCA {
first[v] = min(idx, first[v]), idx++;
for (int u : adj[v]) {
- if (first[u] == 2 * sz(adj)) {
+ if (first[u] == 2 * ssize(adj)) {
dfs(adj, u, d + 1);
visited[idx] = v, depth[idx] = d, idx++;
}}}
int getLCA(int u, int v) {
if (first[u] > first[v]) swap(u, v);
- return visited[st.queryIdempotent(first[u], first[v] + 1)];
+ return visited[st.query(first[u], first[v] + 1)];
}
- ll getDepth(int v) {return depth[first[v]];}
+ ll getDepth(int v) { return depth[first[v]]; }
};
diff --git a/content/graph/TSP.cpp b/content/graph/TSP.cpp
index 6223858..4d2479c 100644
--- a/content/graph/TSP.cpp
+++ b/content/graph/TSP.cpp
@@ -1,7 +1,7 @@
vector<vector<ll>> dist; // Entfernung zwischen je zwei Punkten.
auto TSP() {
- int n = sz(dist), m = 1 << n;
+ int n = ssize(dist), m = 1 << n;
vector<vector<edge>> dp(n, vector<edge>(m, edge{INF, -1}));
for (int c = 0; c < n; c++)
@@ -21,7 +21,7 @@ auto TSP() {
vector<int> tour = {0};
int v = 0;
- while (tour.back() != 0 || sz(tour) == 1)
+ while (tour.back() != 0 || ssize(tour) == 1)
tour.push_back(dp[tour.back()]
[(v |= (1 << tour.back()))].to);
// Enthält Knoten 0 zweimal. An erster und letzter Position.
diff --git a/content/graph/articulationPoints.cpp b/content/graph/articulationPoints.cpp
index 25ff67e..60970e6 100644
--- a/content/graph/articulationPoints.cpp
+++ b/content/graph/articulationPoints.cpp
@@ -14,14 +14,14 @@ int dfs(int v, int from = -1) {
if (num[e.to] < me) st.push_back(e);
} else {
if (v == root) rootCount++;
- int si = sz(st);
+ int si = ssize(st);
int up = dfs(e.to, e.id);
top = min(top, up);
if (up >= me) isArt[v] = true;
if (up > me) bridges.push_back(e);
if (up <= me) st.push_back(e);
if (up == me) {
- bcc.emplace_back(si + all(st));
+ bcc.emplace_back(begin(st) + si, end(st));
st.resize(si);
}}}
return top;
@@ -29,12 +29,12 @@ int dfs(int v, int from = -1) {
void find() {
counter = 0;
- num.assign(sz(adj), 0);
- isArt.assign(sz(adj), false);
+ num.assign(ssize(adj), 0);
+ isArt.assign(ssize(adj), false);
bridges.clear();
st.clear();
bcc.clear();
- for (int v = 0; v < sz(adj); v++) {
+ for (int v = 0; v < ssize(adj); v++) {
if (!num[v]) {
root = v;
rootCount = 0;
diff --git a/content/graph/binary_lifting.cpp b/content/graph/binary_lifting.cpp
new file mode 100644
index 0000000..f88b1a9
--- /dev/null
+++ b/content/graph/binary_lifting.cpp
@@ -0,0 +1,28 @@
+struct Lift {
+ vector<int> dep, par, jmp;
+
+ Lift(vector<vector<int>> &adj, int root):
+ dep(adj.size()), par(adj.size()), jmp(adj.size(), root) {
+ auto dfs = [&](auto &self, int u, int p, int d) -> void {
+ dep[u] = d, par[u] = p;
+ jmp[u] = dep[p] + dep[jmp[jmp[p]]] == 2*dep[jmp[p]]
+ ? jmp[jmp[p]] : p;
+ for (int v: adj[u]) if (v != p) self(self, v, u, d+1);
+ };
+ dfs(dfs, root, root, 0);
+ }
+
+ int depth(int v) { return dep[v]; }
+ int lift(int v, int d) {
+ while (dep[v] > d) v = dep[jmp[v]] < d ? par[v] : jmp[v];
+ return v;
+ }
+ int lca(int u, int v) {
+ v = lift(v, dep[u]), u = lift(u, dep[v]);
+ while (u != v) {
+ auto &a = jmp[u] == jmp[v] ? par : jmp;
+ u = a[u], v = a[v];
+ }
+ return u;
+ }
+};
diff --git a/content/graph/bitonicTSP.cpp b/content/graph/bitonicTSP.cpp
index eee5082..eeff156 100644
--- a/content/graph/bitonicTSP.cpp
+++ b/content/graph/bitonicTSP.cpp
@@ -1,10 +1,10 @@
vector<vector<double>> dist; // Initialisiere mit Entfernungen zwischen Punkten.
auto bitonicTSP() {
- vector<double> dp(sz(dist), HUGE_VAL);
- vector<int> pre(sz(dist)); // nur für Tour
+ vector<double> dp(ssize(dist), HUGE_VAL);
+ vector<int> pre(ssize(dist)); // nur für Tour
dp[0] = 0; dp[1] = 2 * dist[0][1]; pre[1] = 0;
- for (unsigned int i = 2; i < sz(dist); i++) {
+ for (unsigned int i = 2; i < ssize(dist); i++) {
double link = 0;
for (int j = i - 2; j >= 0; j--) {
link += dist[j + 1][j + 2];
@@ -13,9 +13,9 @@ auto bitonicTSP() {
dp[i] = opt;
pre[i] = j;
}}}
- // return dp.back(); // Länger der Tour
+ // return dp.back(); // Länge der Tour
- int j, n = sz(dist) - 1;
+ int j, n = ssize(dist) - 1;
vector<int> ut, lt = {n, n - 1};
do {
j = pre[n];
@@ -25,7 +25,7 @@ auto bitonicTSP() {
}
} while(n = j + 1, j > 0);
(lt.back() == 1 ? lt : ut).push_back(0);
- reverse(all(lt));
- lt.insert(lt.end(), all(ut));
+ ranges::reverse(lt);
+ lt.insert(end(lt), begin(ut), end(ut));
return lt; // Enthält Knoten 0 zweimal. An erster und letzter Position.
}
diff --git a/content/graph/bitonicTSPsimple.cpp b/content/graph/bitonicTSPsimple.cpp
index cacfb9c..b6d72d8 100644
--- a/content/graph/bitonicTSPsimple.cpp
+++ b/content/graph/bitonicTSPsimple.cpp
@@ -3,7 +3,7 @@ vector<vector<double>> dp;
double get(int p1, int p2) {
int v = max(p1, p2) + 1;
- if (v == sz(dist)) return dist[p1][v - 1] + dist[p2][v - 1];
+ if (v == ssize(dist)) return dist[p1][v - 1] + dist[p2][v - 1];
if (dp[p1][p2] >= 0.0) return dp[p1][p2];
double tryLR = dist[p1][v] + get(v, p2);
double tryRL = dist[p2][v] + get(p1, v);
@@ -11,17 +11,19 @@ double get(int p1, int p2) {
}
auto bitonicTSP() {
- dp = vector<vector<double>>(sz(dist),
- vector<double>(sz(dist), -1));
+ dp = vector<vector<double>>(ssize(dist),
+ vector<double>(ssize(dist), -1));
get(0, 0);
- // return dp[0][0]; // Länger der Tour
+ // return dp[0][0]; // Länge der Tour
vector<int> lr = {0}, rl = {0};
- for (int p1 = 0, p2 = 0, v; (v = max(p1, p2)+1) < sz(dist);) {
+ for (int p1 = 0, p2 = 0, v;
+ (v = max(p1, p2)+1) < ssize(dist);) {
if (dp[p1][p2] == dist[p1][v] + dp[v][p2]) {
lr.push_back(v); p1 = v;
} else {
rl.push_back(v); p2 = v;
}}
lr.insert(lr.end(), rl.rbegin(), rl.rend());
- return lr; // Enthält Knoten 0 zweimal. An erster und letzter Position.
+ // Enthält Knoten 0 zweimal. An erster und letzter Position.
+ return lr;
}
diff --git a/content/graph/blossom.cpp b/content/graph/blossom.cpp
index 7bd494a..3c9bd31 100644
--- a/content/graph/blossom.cpp
+++ b/content/graph/blossom.cpp
@@ -32,7 +32,7 @@ struct GM {
auto h = label[r] = label[s] = {~x, y};
int join;
while (true) {
- if (s != sz(adj)) swap(r, s);
+ if (s != ssize(adj)) swap(r, s);
r = findFirst(label[pairs[r]].first);
if (label[r] == h) {
join = r;
@@ -48,13 +48,13 @@ struct GM {
}}}
bool augment(int v) {
- label[v] = {sz(adj), -1};
- first[v] = sz(adj);
+ label[v] = {ssize(adj), -1};
+ first[v] = ssize(adj);
head = tail = 0;
for (que[tail++] = v; head < tail;) {
int x = que[head++];
for (int y : adj[x]) {
- if (pairs[y] == sz(adj) && y != v) {
+ if (pairs[y] == ssize(adj) && y != v) {
pairs[y] = x;
rematch(x, y);
return true;
@@ -70,12 +70,12 @@ struct GM {
int match() {
int matching = head = tail = 0;
- for (int v = 0; v < sz(adj); v++) {
- if (pairs[v] < sz(adj) || !augment(v)) continue;
+ for (int v = 0; v < ssize(adj); v++) {
+ if (pairs[v] < ssize(adj) || !augment(v)) continue;
matching++;
for (int i = 0; i < tail; i++)
label[que[i]] = label[pairs[que[i]]] = {-1, -1};
- label[sz(adj)] = {-1, -1};
+ label[ssize(adj)] = {-1, -1};
}
return matching;
}
diff --git a/content/graph/bronKerbosch.cpp b/content/graph/bronKerbosch.cpp
index 0cfcc5f..cf07c88 100644
--- a/content/graph/bronKerbosch.cpp
+++ b/content/graph/bronKerbosch.cpp
@@ -11,7 +11,7 @@ void bronKerboschRec(bits R, bits P, bits X) {
} else {
int q = min(P._Find_first(), X._Find_first());
bits cands = P & ~adj[q];
- for (int i = 0; i < sz(adj); i++) if (cands[i]) {
+ for (int i = 0; i < ssize(adj); i++) if (cands[i]) {
R[i] = 1;
bronKerboschRec(R, P & adj[i], X & adj[i]);
R[i] = P[i] = 0;
@@ -20,5 +20,5 @@ void bronKerboschRec(bits R, bits P, bits X) {
void bronKerbosch() {
cliques.clear();
- bronKerboschRec({}, {(1ull << sz(adj)) - 1}, {});
+ bronKerboschRec({}, {(1ull << ssize(adj)) - 1}, {});
}
diff --git a/content/graph/centroid.cpp b/content/graph/centroid.cpp
index 820945b..3cd5519 100644
--- a/content/graph/centroid.cpp
+++ b/content/graph/centroid.cpp
@@ -15,7 +15,7 @@ pair<int, int> dfs_cent(int v, int from, int n) {
}
pair<int, int> find_centroid(int root = 0) {
- s.resize(sz(adj));
+ s.resize(ssize(adj));
dfs_sz(root);
return dfs_cent(root, -1, s[root]);
}
diff --git a/content/graph/cycleCounting.cpp b/content/graph/cycleCounting.cpp
index 6a299ee..deac71e 100644
--- a/content/graph/cycleCounting.cpp
+++ b/content/graph/cycleCounting.cpp
@@ -9,8 +9,8 @@ struct cycles {
cycles(int n) : adj(n), seen(n), paths(n) {}
void addEdge(int u, int v) {
- adj[u].push_back({v, sz(edges)});
- adj[v].push_back({u, sz(edges)});
+ adj[u].push_back({v, ssize(edges)});
+ adj[v].push_back({u, ssize(edges)});
edges.push_back({u, v});
}
@@ -36,10 +36,10 @@ struct cycles {
cur[id].flip();
}}}
- bool isCycle(cycle cur) {//cycle must be constrcuted from base
+ bool isCycle(cycle cur) {// cycle must be constructed from base
if (cur.none()) return false;
- init(sz(adj)); // union find @\sourceref{datastructures/unionFind.cpp}@
- for (int i = 0; i < sz(edges); i++) {
+ init(ssize(adj)); // union find @\sourceref{datastructures/unionFind.cpp}@
+ for (int i = 0; i < ssize(edges); i++) {
if (cur[i]) {
cur[i] = false;
if (findSet(edges[i].first) ==
@@ -50,12 +50,12 @@ struct cycles {
}
int count() {
- for (int i = 0; i < sz(adj); i++) findBase(i);
- assert(sz(base) < 30);
+ for (int i = 0; i < ssize(adj); i++) findBase(i);
+ assert(ssize(base) < 30);
int res = 0;
- for (int i = 1; i < (1 << sz(base)); i++) {
+ for (int i = 1; i < (1 << ssize(base)); i++) {
cycle cur;
- for (int j = 0; j < sz(base); j++)
+ for (int j = 0; j < ssize(base); j++)
if (((i >> j) & 1) != 0) cur ^= base[j];
if (isCycle(cur)) res++;
}
diff --git a/content/graph/dijkstra.cpp b/content/graph/dijkstra.cpp
index 61c636d..ab4bef9 100644
--- a/content/graph/dijkstra.cpp
+++ b/content/graph/dijkstra.cpp
@@ -1,21 +1,18 @@
-using path = pair<ll, int>; //dist, destination
+using Dist = ll;
-auto dijkstra(const vector<vector<path>>& adj, int start) {
- priority_queue<path, vector<path>, greater<path>> pq;
- vector<ll> dist(sz(adj), INF);
- vector<int> prev(sz(adj), -1);
- dist[start] = 0; pq.emplace(0, start);
+auto dijkstra(vector<vector<pair<int, Dist>>> &adj, int start) {
+ priority_queue<pair<Dist, int>> pq;
+ vector<Dist> dist(ssize(adj), INF);
+ dist[start] = 0, pq.emplace(0, start);
- while (!pq.empty()) {
- auto [dv, v] = pq.top(); pq.pop();
- if (dv > dist[v]) continue; // WICHTIG!
+ while (!empty(pq)) {
+ auto [du, u] = pq.top();
+ du = -du, pq.pop();
+ if (du > dist[u]) continue; // WICHTIG!
- for (auto [du, u] : adj[v]) {
- ll newDist = dv + du;
- if (newDist < dist[u]) {
- dist[u] = newDist;
- prev[u] = v;
- pq.emplace(dist[u], u);
- }}}
- return dist; //return prev;
+ for (auto [v, d]: adj[u]) {
+ Dist dv = du + d;
+ if (dv < dist[v]) dist[v] = dv, pq.emplace(-dv, v);
+ }}
+ return dist;
}
diff --git a/content/graph/dinic.cpp b/content/graph/dinic.cpp
index 2e58a2d..c8c34a8 100644
--- a/content/graph/dinic.cpp
+++ b/content/graph/dinic.cpp
@@ -8,12 +8,12 @@ int s, t;
vector<int> pt, dist;
void addEdge(int u, int v, ll c) {
- adj[u].push_back({v, (int)sz(adj[v]), 0, c});
- adj[v].push_back({u, (int)sz(adj[u]) - 1, 0, 0});
+ adj[u].push_back({v, (int)ssize(adj[v]), 0, c});
+ adj[v].push_back({u, (int)ssize(adj[u]) - 1, 0, 0});
}
bool bfs() {
- dist.assign(sz(adj), -1);
+ dist.assign(ssize(adj), -1);
dist[s] = 0;
queue<int> q({s});
while (!q.empty() && dist[t] < 0) {
@@ -28,7 +28,7 @@ bool bfs() {
ll dfs(int v, ll flow = INF) {
if (v == t || flow == 0) return flow;
- for (; pt[v] < sz(adj[v]); pt[v]++) {
+ for (; pt[v] < ssize(adj[v]); pt[v]++) {
Edge& e = adj[v][pt[v]];
if (dist[e.to] != dist[v] + 1) continue;
ll cur = dfs(e.to, min(e.c - e.f, flow));
@@ -44,7 +44,7 @@ ll maxFlow(int source, int target) {
s = source, t = target;
ll flow = 0;
while (bfs()) {
- pt.assign(sz(adj), 0);
+ pt.assign(ssize(adj), 0);
ll cur;
do {
cur = dfs(s);
diff --git a/content/graph/dinicScaling.cpp b/content/graph/dinicScaling.cpp
index 0974b78..0082c05 100644
--- a/content/graph/dinicScaling.cpp
+++ b/content/graph/dinicScaling.cpp
@@ -8,12 +8,12 @@ int s, t;
vector<int> pt, dist;
void addEdge(int u, int v, ll c) {
- adj[u].push_back({v, (int)sz(adj[v]), 0, c});
- adj[v].push_back({u, (int)sz(adj[u]) - 1, 0, 0});
+ adj[u].push_back({v, (int)ssize(adj[v]), 0, c});
+ adj[v].push_back({u, (int)ssize(adj[u]) - 1, 0, 0});
}
bool bfs(ll lim) {
- dist.assign(sz(adj), -1);
+ dist.assign(ssize(adj), -1);
dist[s] = 0;
queue<int> q({s});
while (!q.empty() && dist[t] < 0) {
@@ -28,7 +28,7 @@ bool bfs(ll lim) {
ll dfs(int v, ll flow) {
if (v == t || flow == 0) return flow;
- for (; pt[v] < sz(adj[v]); pt[v]++) {
+ for (; pt[v] < ssize(adj[v]); pt[v]++) {
Edge& e = adj[v][pt[v]];
if (dist[e.to] != dist[v] + 1) continue;
ll cur = dfs(e.to, min(e.c - e.f, flow));
@@ -45,7 +45,7 @@ ll maxFlow(int source, int target) {
ll flow = 0;
for (ll lim = (1LL << 62); lim >= 1; lim /= 2) {
while (bfs(lim)) {
- pt.assign(sz(adj), 0);
+ pt.assign(ssize(adj), 0);
ll cur;
do {
cur = dfs(s, lim);
diff --git a/content/graph/euler.cpp b/content/graph/euler.cpp
index e81cebe..d45dac0 100644
--- a/content/graph/euler.cpp
+++ b/content/graph/euler.cpp
@@ -2,8 +2,8 @@ vector<vector<pair<int, int>>> adj; // gets destroyed!
vector<int> cycle;
void addEdge(int u, int v) {
- adj[u].emplace_back(v, sz(adj[v]));
- adj[v].emplace_back(u, sz(adj[u]) - 1); // remove for directed
+ adj[u].emplace_back(v, ssize(adj[v]));
+ adj[v].emplace_back(u, ssize(adj[u]) - 1); // remove for directed
}
void euler(int v) {
diff --git a/content/graph/floydWarshall.cpp b/content/graph/floydWarshall.cpp
index df096c2..1a1138d 100644
--- a/content/graph/floydWarshall.cpp
+++ b/content/graph/floydWarshall.cpp
@@ -2,16 +2,16 @@ vector<vector<ll>> dist; // Entfernung zwischen je zwei Punkten.
vector<vector<int>> next;
void floydWarshall() {
- next.assign(sz(dist), vector<int>(sz(dist), -1));
- for (int i = 0; i < sz(dist); i++) {
- for (int j = 0; j < sz(dist); j++) {
+ next.assign(ssize(dist), vector<int>(ssize(dist), -1));
+ for (int i = 0; i < ssize(dist); i++) {
+ for (int j = 0; j < ssize(dist); j++) {
if (dist[i][j] < INF) {
next[i][j] = j;
}}}
- for (int k = 0; k < sz(dist); k++) {
- for (int i = 0; i < sz(dist); i++) {
- for (int j = 0; j < sz(dist); j++) {
+ for (int k = 0; k < ssize(dist); k++) {
+ for (int i = 0; i < ssize(dist); i++) {
+ for (int j = 0; j < ssize(dist); j++) {
// only needed if dist can be negative
if (dist[i][k] == INF || dist[k][j] == INF) continue;
if (dist[i][j] > dist[i][k] + dist[k][j]) {
diff --git a/content/graph/graph.tex b/content/graph/graph.tex
index 213c597..6e8e20b 100644
--- a/content/graph/graph.tex
+++ b/content/graph/graph.tex
@@ -1,12 +1,5 @@
\section{Graphen}
-\begin{algorithm}{Kruskal}
- \begin{methods}[ll]
- berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\
- \end{methods}
- \sourcecode{graph/kruskal.cpp}
-\end{algorithm}
-
\begin{algorithm}{Minimale Spannbäume}
\paragraph{Schnitteigenschaft}
Für jeden Schnitt $C$ im Graphen gilt:
@@ -16,6 +9,12 @@
\paragraph{Kreiseigenschaft}
Für jeden Kreis $K$ im Graphen gilt:
Die schwerste Kante auf dem Kreis ist nicht Teil des minimalen Spannbaums.
+
+ \subsection{\textsc{Kruskal}}
+ \begin{methods}[ll]
+ berechnet den Minimalen Spannbaum & \runtime{\abs{E}\cdot\log(\abs{E})} \\
+ \end{methods}
+ \sourcecode{graph/kruskal.cpp}
\end{algorithm}
\begin{algorithm}{Heavy-Light Decomposition}
@@ -28,7 +27,7 @@
\sourcecode{graph/hld.cpp}
\end{algorithm}
-\begin{algorithm}{Lowest Common Ancestor}
+\begin{algorithm}[optional]{Lowest Common Ancestor}
\begin{methods}
\method{init}{baut DFS-Baum über $g$ auf}{\abs{V}\*\log(\abs{V})}
\method{getLCA}{findet LCA}{1}
@@ -37,6 +36,17 @@
\sourcecode{graph/LCA_sparse.cpp}
\end{algorithm}
+\begin{algorithm}{Binary Lifting}
+ % https://codeforces.com/blog/entry/74847
+ \begin{methods}
+ \method{Lift}{constructor}{\abs{V}}
+ \method{depth}{distance to root of vertex $v$}{1}
+ \method{lift}{vertex above $v$ at depth $d$}{\log(\abs{V})}
+ \method{lca}{lowest common ancestor of $u$ and $v$}{\log(\abs{V})}
+ \end{methods}
+ \sourcecode{graph/binary_lifting.cpp}
+\end{algorithm}
+
\begin{algorithm}{Centroids}
\begin{methods}
\method{find\_centroid}{findet alle Centroids des Baums (maximal 2)}{\abs{V}}
@@ -99,7 +109,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da
\sourcecode{graph/connect.cpp}
\end{algorithm}
-\begin{algorithm}{Erd\H{o}s-Gallai}
+\begin{algorithm}{\textsc{Erd\H{o}s-Gallai}}
Sei $d_1 \geq \cdots \geq d_{n}$. Es existiert genau dann ein Graph $G$ mit Degreesequence $d$ falls $\sum\limits_{i=1}^{n} d_i$ gerade ist und für $1\leq k \leq n$: $\sum\limits_{i=1}^{k} d_i \leq k\cdot(k-1)+\sum\limits_{i=k+1}^{n} \min(d_i, k)$
\begin{methods}
\method{havelHakimi}{findet Graph}{(\abs{V}+\abs{E})\cdot\log(\abs{V})}
@@ -170,7 +180,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da
\sourcecode{graph/virtualTree.cpp}
\end{algorithm}
-\begin{algorithm}{Maximum Cardinatlity Bipartite Matching}
+\begin{algorithm}{Maximum Cardinality Bipartite Matching}
\label{kuhn}
\begin{methods}
\method{kuhn}{berechnet Matching}{\abs{V}\*\min(ans^2, \abs{E})}
@@ -178,7 +188,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da
\begin{itemize}
\item die ersten [0..l) Knoten in \code{adj} sind die linke Seite des Graphen
\end{itemize}
- \sourcecode{graph/maxCarBiMatch.cpp}
+ \sourcecode{graph/kuhn.cpp}
\begin{methods}
\method{hopcroft\_karp}{berechnet Matching}{\sqrt{\abs{V}}\*\abs{E}}
\end{methods}
@@ -197,7 +207,7 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da
\subsection{Max-Flow}
\optional{
-\subsubsection{Push Relabel}
+\subsubsection{Push Relabel \opthint}
\begin{methods}
\method{maxFlow}{gut bei sehr dicht besetzten Graphen.}{\abs{V}^2\*\sqrt{\abs{E}}}
\method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1}
@@ -205,24 +215,23 @@ Sei $a_{ij}$ die Adjazenzmatrix von $G$ \textcolor{gray}{(mit $a_{ii} = 1$)}, da
\sourcecode{graph/pushRelabel.cpp}
}
+\subsubsection{\textsc{Dinic}'s Algorithm mit Capacity Scaling}
+\begin{methods}
+ \method{maxFlow}{doppelt so schnell wie \textsc{Ford-Fulkerson}}{\abs{V}^2\cdot\abs{E}}
+ \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1}
+\end{methods}
+\sourcecode{graph/dinicScaling.cpp}
+
\begin{algorithm}{Min-Cost-Max-Flow}
\begin{methods}
\method{mincostflow}{berechnet Fluss}{\abs{V}^2\cdot\abs{E}^2}
\end{methods}
\sourcecode{graph/minCostMaxFlow.cpp}
\end{algorithm}
-\vfill\null
\columnbreak
-\subsubsection{Dinic's Algorithm mit Capacity Scaling}
-\begin{methods}
- \method{maxFlow}{doppelt so schnell wie Ford Fulkerson}{\abs{V}^2\cdot\abs{E}}
- \method{addEdge}{fügt eine \textbf{gerichtete} Kante ein}{1}
-\end{methods}
-\sourcecode{graph/dinicScaling.cpp}
-
\optional{
-\subsubsection{Anwendungen}
+\subsubsection{Anwendungen \opthint}
\begin{itemize}
\item \textbf{Maximum Edge Disjoint Paths}\newline
Finde die maximale Anzahl Pfade von $s$ nach $t$, die keine Kante teilen.
diff --git a/content/graph/havelHakimi.cpp b/content/graph/havelHakimi.cpp
index ac4d67d..9f4c081 100644
--- a/content/graph/havelHakimi.cpp
+++ b/content/graph/havelHakimi.cpp
@@ -1,12 +1,12 @@
vector<vector<int>> havelHakimi(const vector<int>& deg) {
priority_queue<pair<int, int>> pq;
- for (int i = 0; i < sz(deg); i++) {
+ for (int i = 0; i < ssize(deg); i++) {
if (deg[i] > 0) pq.push({deg[i], i});
}
- vector<vector<int>> adj(sz(deg));
+ vector<vector<int>> adj(ssize(deg));
while (!pq.empty()) {
auto [degV, v] = pq.top(); pq.pop();
- if (sz(pq) < degV) return {}; //impossible
+ if (ssize(pq) < degV) return {}; //impossible
vector<pair<int, int>> todo(degV);
for (auto& e : todo) e = pq.top(), pq.pop();
for (auto [degU, u] : todo) {
diff --git a/content/graph/hld.cpp b/content/graph/hld.cpp
index 65d3f5c..e365b13 100644
--- a/content/graph/hld.cpp
+++ b/content/graph/hld.cpp
@@ -21,7 +21,7 @@ void dfs_hld(int v = 0, int from = -1) {
}
void init(int root = 0) {
- int n = sz(adj);
+ int n = ssize(adj);
sz.assign(n, 1), nxt.assign(n, root), par.assign(n, -1);
in.resize(n), out.resize(n);
counter = 0;
diff --git a/content/graph/hopcroftKarp.cpp b/content/graph/hopcroftKarp.cpp
index c1f5d1c..d07bd3a 100644
--- a/content/graph/hopcroftKarp.cpp
+++ b/content/graph/hopcroftKarp.cpp
@@ -5,14 +5,14 @@ vector<int> pairs, dist, ptr;
bool bfs(int l) {
queue<int> q;
for(int v = 0; v < l; v++) {
- if (pairs[v] < 0) {dist[v] = 0; q.push(v);}
+ if (pairs[v] < 0) { dist[v] = 0; q.push(v); }
else dist[v] = -1;
}
bool exist = false;
while(!q.empty()) {
int v = q.front(); q.pop();
for (int u : adj[v]) {
- if (pairs[u] < 0) {exist = true; continue;}
+ if (pairs[u] < 0) { exist = true; continue; }
if (dist[pairs[u]] < 0) {
dist[pairs[u]] = dist[v] + 1;
q.push(pairs[u]);
@@ -21,7 +21,7 @@ bool bfs(int l) {
}
bool dfs(int v) {
- for (; ptr[v] < sz(adj[v]); ptr[v]++) {
+ for (; ptr[v] < ssize(adj[v]); ptr[v]++) {
int u = adj[v][ptr[v]];
if (pairs[u] < 0 ||
(dist[pairs[u]] > dist[v] && dfs(pairs[u]))) {
@@ -33,7 +33,7 @@ bool dfs(int v) {
int hopcroft_karp(int l) { // l = #Knoten links
int ans = 0;
- pairs.assign(sz(adj), -1);
+ pairs.assign(ssize(adj), -1);
dist.resize(l);
// Greedy Matching, optionale Beschleunigung.
for (int v = 0; v < l; v++) for (int u : adj[v])
diff --git a/content/graph/kruskal.cpp b/content/graph/kruskal.cpp
index 987d30b..d42800d 100644
--- a/content/graph/kruskal.cpp
+++ b/content/graph/kruskal.cpp
@@ -1,4 +1,4 @@
-sort(all(edges));
+ranges::sort(edges, less{});
vector<Edge> mst;
ll cost = 0;
for (Edge& e : edges) {
diff --git a/content/graph/maxCarBiMatch.cpp b/content/graph/kuhn.cpp
index e928387..688c846 100644
--- a/content/graph/maxCarBiMatch.cpp
+++ b/content/graph/kuhn.cpp
@@ -12,7 +12,7 @@ bool dfs(int v) {
}
int kuhn(int l) { // l = #Knoten links.
- pairs.assign(sz(adj), -1);
+ pairs.assign(ssize(adj), -1);
int ans = 0;
// Greedy Matching. Optionale Beschleunigung.
for (int v = 0; v < l; v++) for (int u : adj[v])
diff --git a/content/graph/matching.cpp b/content/graph/matching.cpp
index dcaea8c..3619d7c 100644
--- a/content/graph/matching.cpp
+++ b/content/graph/matching.cpp
@@ -3,19 +3,19 @@ vector<vector<ll>> adj, mat;
int max_matching() {
int ans = 0;
- mat.assign(sz(adj), {});
+ mat.assign(ssize(adj), {});
for (int _ = 0; _ < I; _++) {
- for (int v = 0; v < sz(adj); v++) {
- mat[v].assign(sz(adj), 0);
+ for (int v = 0; v < ssize(adj); v++) {
+ mat[v].assign(ssize(adj), 0);
for (int u : adj[v]) {
if (u < v) {
mat[v][u] = rand() % (MOD - 1) + 1;
mat[u][v] = MOD - mat[v][u];
}}}
- gauss(sz(adj), MOD); //LGS @\sourceref{math/lgsFp.cpp}@
+ gauss(ssize(adj), MOD); //LGS @\sourceref{math/lgsFp.cpp}@
int rank = 0;
for (auto& row : mat) {
- if (*max_element(all(row)) != 0) rank++;
+ if (*ranges::max_element(row) != 0) rank++;
}
ans = max(ans, rank / 2);
}
diff --git a/content/graph/maxWeightBipartiteMatching.cpp b/content/graph/maxWeightBipartiteMatching.cpp
index a2b0a80..b6f6ddf 100644
--- a/content/graph/maxWeightBipartiteMatching.cpp
+++ b/content/graph/maxWeightBipartiteMatching.cpp
@@ -45,6 +45,6 @@ double match(int l, int r) {
yx[y] = aug[y];
swap(y, xy[aug[y]]);
}}
- return accumulate(all(lx), 0.0) +
- accumulate(all(ly), 0.0); // Wert des Matchings
+ return accumulate(begin(lx), end(lx), 0.0) +
+ accumulate(begin(ly), end(ly), 0.0); // Wert des Matchings
}
diff --git a/content/graph/minCostMaxFlow.cpp b/content/graph/minCostMaxFlow.cpp
index 14a222c..fde95f3 100644
--- a/content/graph/minCostMaxFlow.cpp
+++ b/content/graph/minCostMaxFlow.cpp
@@ -15,16 +15,16 @@ struct MinCostFlow {
adj(n), s(source), t(target) {};
void addEdge(int u, int v, ll c, ll cost) {
- adj[u].push_back(sz(edges));
+ adj[u].push_back(ssize(edges));
edges.push_back({v, c, cost});
- adj[v].push_back(sz(edges));
+ adj[v].push_back(ssize(edges));
edges.push_back({u, 0, -cost});
}
bool SPFA() {
- pref.assign(sz(adj), -1);
- dist.assign(sz(adj), INF);
- vector<bool> inqueue(sz(adj));
+ pref.assign(ssize(adj), -1);
+ dist.assign(ssize(adj), INF);
+ vector<bool> inqueue(ssize(adj));
queue<int> queue;
dist[s] = 0;
queue.push(s);
@@ -59,7 +59,7 @@ struct MinCostFlow {
}}
void mincostflow() {
- con.assign(sz(adj), 0);
+ con.assign(ssize(adj), 0);
maxflow = mincost = 0;
while (SPFA()) extend();
}
diff --git a/content/graph/pushRelabel.cpp b/content/graph/pushRelabel.cpp
index ec36026..c569df2 100644
--- a/content/graph/pushRelabel.cpp
+++ b/content/graph/pushRelabel.cpp
@@ -9,8 +9,8 @@ vector<ll> ec;
vector<int> cur, H;
void addEdge(int u, int v, ll c) {
- adj[u].push_back({v, (int)sz(adj[v]), 0, c});
- adj[v].push_back({u, (int)sz(adj[u])-1, 0, 0});
+ adj[u].push_back({v, (int)ssize(adj[v]), 0, c});
+ adj[v].push_back({u, (int)ssize(adj[u])-1, 0, 0});
}
void addFlow(Edge& e, ll f) {
@@ -23,7 +23,7 @@ void addFlow(Edge& e, ll f) {
}
ll maxFlow(int s, int t) {
- int n = sz(adj);
+ int n = ssize(adj);
hs.assign(2*n, {});
ec.assign(n, 0);
cur.assign(n, 0);
@@ -38,9 +38,9 @@ ll maxFlow(int s, int t) {
int v = hs[hi].back();
hs[hi].pop_back();
while (ec[v] > 0) {
- if (cur[v] == sz(adj[v])) {
+ if (cur[v] == ssize(adj[v])) {
H[v] = 2*n;
- for (int i = 0; i < sz(adj[v]); i++) {
+ for (int i = 0; i < ssize(adj[v]); i++) {
Edge& e = adj[v][i];
if (e.c - e.f > 0 &&
H[v] > H[e.to] + 1) {
diff --git a/content/graph/reroot.cpp b/content/graph/reroot.cpp
index 379c839..5a9c9d1 100644
--- a/content/graph/reroot.cpp
+++ b/content/graph/reroot.cpp
@@ -26,11 +26,11 @@ struct Reroot {
pref.push_back(takeChild(v, u, w, dp[u]));
}
auto suf = pref;
- partial_sum(all(pref), pref.begin(), comb);
+ partial_sum(begin(pref), end(pref), begin(pref), comb);
exclusive_scan(suf.rbegin(), suf.rend(),
suf.rbegin(), E, comb);
- for (int i = 0; i < sz(adj[v]); i++) {
+ for (int i = 0; i < ssize(adj[v]); i++) {
auto [u, w] = adj[v][i];
if (u == from) continue;
dp[v] = fin(v, comb(pref[i], suf[i + 1]));
@@ -40,7 +40,7 @@ struct Reroot {
}
auto solve() {
- dp.assign(sz(adj), E);
+ dp.assign(ssize(adj), E);
dfs0(0);
dfs1(0);
return dp;
diff --git a/content/graph/scc.cpp b/content/graph/scc.cpp
index 32f1099..6887712 100644
--- a/content/graph/scc.cpp
+++ b/content/graph/scc.cpp
@@ -23,11 +23,11 @@ void visit(int v) {
}}}
void scc() {
- inStack.assign(sz(adj), false);
- low.assign(sz(adj), -1);
- idx.assign(sz(adj), -1);
+ inStack.assign(ssize(adj), false);
+ low.assign(ssize(adj), -1);
+ idx.assign(ssize(adj), -1);
counter = sccCounter = 0;
- for (int i = 0; i < sz(adj); i++) {
+ for (int i = 0; i < ssize(adj); i++) {
if (low[i] < 0) visit(i);
}}
diff --git a/content/graph/stoerWagner.cpp b/content/graph/stoerWagner.cpp
index 97e667a..a122488 100644
--- a/content/graph/stoerWagner.cpp
+++ b/content/graph/stoerWagner.cpp
@@ -7,7 +7,7 @@ vector<vector<Edge>> adj, tmp;
vector<bool> erased;
void merge(int u, int v) {
- tmp[u].insert(tmp[u].end(), all(tmp[v]));
+ tmp[u].insert(end(tmp[u]), begin(tmp[v]), end(tmp[v]));
tmp[v].clear();
erased[v] = true;
for (auto& vec : tmp) {
@@ -19,33 +19,33 @@ void merge(int u, int v) {
ll stoer_wagner() {
ll res = INF;
tmp = adj;
- erased.assign(sz(tmp), false);
- for (int i = 1; i < sz(tmp); i++) {
+ erased.assign(ssize(tmp), false);
+ for (int i = 1; i < ssize(tmp); i++) {
int s = 0;
while (erased[s]) s++;
priority_queue<pair<ll, int>> pq;
pq.push({0, s});
- vector<ll> con(sz(tmp));
+ vector<ll> con(ssize(tmp));
ll cur = 0;
vector<pair<ll, int>> state;
while (!pq.empty()) {
int c = pq.top().second;
pq.pop();
- if (con[c] < 0) continue; //already seen
+ if (con[c] < 0) continue; // already seen
con[c] = -1;
for (auto e : tmp[c]) {
- if (con[e.to] >= 0) {//add edge to cut
+ if (con[e.to] >= 0) { // add edge to cut
con[e.to] += e.cap;
pq.push({con[e.to], e.to});
cur += e.cap;
- } else if (e.to != c) {//remove edge from cut
+ } else if (e.to != c) { // remove edge from cut
cur -= e.cap;
}}
state.push_back({cur, c});
}
int t = state.back().second;
state.pop_back();
- if (state.empty()) return 0; //graph is not connected?!
+ if (state.empty()) return 0; // graph is not connected?!
merge(state.back().second, t);
res = min(res, state.back().first);
}
diff --git a/content/graph/treeIsomorphism.cpp b/content/graph/treeIsomorphism.cpp
index 355fefb..8c2ca21 100644
--- a/content/graph/treeIsomorphism.cpp
+++ b/content/graph/treeIsomorphism.cpp
@@ -7,9 +7,9 @@ int treeLabel(int v, int from = -1) {
if (u == from) continue;
children.push_back(treeLabel(u, v));
}
- sort(all(children));
+ ranges::sort(children);
if (known.find(children) == known.end()) {
- known[children] = sz(known);
+ known[children] = ssize(known);
}
return known[children];
}
diff --git a/content/graph/virtualTree.cpp b/content/graph/virtualTree.cpp
index 6233b27..81ba001 100644
--- a/content/graph/virtualTree.cpp
+++ b/content/graph/virtualTree.cpp
@@ -2,14 +2,14 @@
vector<int> in, out;
void virtualTree(vector<int> ind) { // indices of used nodes
- sort(all(ind), [&](int x, int y) {return in[x] < in[y];});
- for (int i = 1, n = sz(ind); i < n; i++) {
+ ranges::sort(ind, {}, [&](int x) { return in[x]; });
+ for (int i = 1, n = ssize(ind); i < n; i++) {
ind.push_back(lca(ind[i - 1], ind[i]));
}
- sort(all(ind), [&](int x, int y) {return in[x] < in[y];});
- ind.erase(unique(all(ind)), ind.end());
+ ranges::sort(ind, {}, [&](int x) { return in[x]; });
+ ind.erase(begin(ranges::unique(ind)), end(ind));
- int n = sz(ind);
+ int n = ssize(ind);
vector<vector<int>> tree(n);
vector<int> st = {0};
for (int i = 1; i < n; i++) {
diff --git a/content/latexHeaders/code.sty b/content/latexHeaders/code.sty
index 3ebdda3..8a600c5 100644
--- a/content/latexHeaders/code.sty
+++ b/content/latexHeaders/code.sty
@@ -1,3 +1,6 @@
+\usepackage{ocgx2}
+\usepackage{fontawesome}
+
% Colors, used for syntax highlighting.
% To print this document, set all colors to black!
\usepackage{xcolor}
@@ -101,6 +104,32 @@
% \addtocounter{lstnumber}{-1}%
%}
+\ifthenelse{\isundefined{\srclink}}{}{
+ \lst@AddToHook{Init}{%
+ \ifthenelse{\equal{\lst@name}{}}{}{%
+ \begin{minipage}[t][0pt]{\linewidth}%
+ \vspace{0pt}%
+ \hfill%
+ \begin{ocg}[printocg=never]{Source links}{srclinks}{1}%
+ \hfill\href{\srclink{\lst@name}}{\faExternalLink}%
+ \end{ocg}%
+ \end{minipage}%
+ }%
+ }
+}
+
+\lst@AddToHook{DeInit}{%
+ \ifthenelse{\equal{\lst@name}{}}{}{%
+ \begin{minipage}[b][0pt]{\linewidth}%
+ \vspace{0pt}%
+ \hfill%
+ \begin{ocg}[printocg=never]{Source file names}{srcfiles}{0}%
+ \hfill\textcolor{gray}{\lst@name}%
+ \end{ocg}%
+ \end{minipage}%
+ }%
+}
+
\newenvironment{btHighlight}[1][]
{\begingroup\tikzset{bt@Highlight@par/.style={#1}}\begin{lrbox}{\@tempboxa}}
{\end{lrbox}\bt@HL@box[bt@Highlight@par]{\@tempboxa}\endgroup}
diff --git a/content/latexHeaders/commands.sty b/content/latexHeaders/commands.sty
index edbba1b..73a7dca 100644
--- a/content/latexHeaders/commands.sty
+++ b/content/latexHeaders/commands.sty
@@ -7,6 +7,11 @@
\newcommand{\code}[1]{\lstinline[breaklines=true]{#1}}
\let\codeSafe\lstinline
+\ifoptional
+ \renewcommand{\columnbreak}{}
+ \newcommand\opthint{\textcolor{gray}{(optional)}}
+\fi
+
\usepackage{tikz}
\usetikzlibrary{angles,quotes}
@@ -17,7 +22,7 @@
\ifthenelse{\equal{#1}{optional}}{%
\optional{
\needspace{4\baselineskip}%
- \subsection{#2\textcolor{gray}{(optional)}}%
+ \subsection{#2 \opthint}%
#3%
}
}{%
diff --git a/content/latexmk.opt b/content/latexmk.opt
new file mode 100644
index 0000000..88d3463
--- /dev/null
+++ b/content/latexmk.opt
@@ -0,0 +1,2 @@
+$jobname = 'tcr-opt';
+$pre_tex_code .= '\def\OPTIONAL{}'
diff --git a/content/latexmkrc b/content/latexmkrc
new file mode 100644
index 0000000..b43f9a2
--- /dev/null
+++ b/content/latexmkrc
@@ -0,0 +1,13 @@
+@default_files = qw(tcr);
+$pdf_mode = 1;
+$aux_dir = ".";
+$out_dir = "..";
+{
+ my $commit = `git rev-parse HEAD`;
+ chomp $commit;
+ $pre_tex_code .=
+ '\newcommand{\srclink}[1]'
+ .'{https://git.gloria-mundi.eu/tcr/plain/content/#1?id='.$commit.'}';
+}
+&alt_tex_cmds;
+$jobname = 'tcr';
diff --git a/content/math/berlekampMassey.cpp b/content/math/berlekampMassey.cpp
index 29e084f..85a1031 100644
--- a/content/math/berlekampMassey.cpp
+++ b/content/math/berlekampMassey.cpp
@@ -1,6 +1,6 @@
constexpr ll mod = 1'000'000'007;
vector<ll> BerlekampMassey(const vector<ll>& s) {
- int n = sz(s), L = 0, m = 0;
+ int n = ssize(s), L = 0, m = 0;
vector<ll> C(n), B(n), T;
C[0] = B[0] = 1;
diff --git a/content/math/bigint.cpp b/content/math/bigint.cpp
index 1b3b953..a40f515 100644
--- a/content/math/bigint.cpp
+++ b/content/math/bigint.cpp
@@ -7,9 +7,9 @@ struct bigint {
bigint() : sign(1) {}
- bigint(ll v) {*this = v;}
+ bigint(ll v) { *this = v; }
- bigint(const string &s) {read(s);}
+ bigint(const string &s) { read(s); }
void operator=(ll v) {
sign = 1;
@@ -22,10 +22,11 @@ struct bigint {
bigint operator+(const bigint& v) const {
if (sign == v.sign) {
bigint res = v;
- for (ll i = 0, carry = 0; i < max(sz(a), sz(v.a)) || carry; ++i) {
- if (i == sz(res.a))
+ for (ll i = 0, carry = 0;
+ i < max(ssize(a), ssize(v.a)) || carry; ++i) {
+ if (i == ssize(res.a))
res.a.push_back(0);
- res.a[i] += carry + (i < sz(a) ? a[i] : 0);
+ res.a[i] += carry + (i < ssize(a) ? a[i] : 0);
carry = res.a[i] >= base;
if (carry)
res.a[i] -= base;
@@ -39,8 +40,8 @@ struct bigint {
if (sign == v.sign) {
if (abs() >= v.abs()) {
bigint res = *this;
- for (ll i = 0, carry = 0; i < sz(v.a) || carry; ++i) {
- res.a[i] -= carry + (i < sz(v.a) ? v.a[i] : 0);
+ for (ll i = 0, carry = 0; i < ssize(v.a) || carry; ++i) {
+ res.a[i] -= carry + (i < ssize(v.a) ? v.a[i] : 0);
carry = res.a[i] < 0;
if (carry) res.a[i] += base;
}
@@ -54,8 +55,8 @@ struct bigint {
void operator*=(ll v) {
if (v < 0) sign = -sign, v = -v;
- for (ll i = 0, carry = 0; i < sz(a) || carry; ++i) {
- if (i == sz(a)) a.push_back(0);
+ for (ll i = 0, carry = 0; i < ssize(a) || carry; ++i) {
+ if (i == ssize(a)) a.push_back(0);
ll cur = a[i] * v + carry;
carry = cur / base;
a[i] = cur % base;
@@ -74,12 +75,12 @@ struct bigint {
bigint a = a1.abs() * norm;
bigint b = b1.abs() * norm;
bigint q, r;
- q.a.resize(sz(a.a));
- for (ll i = sz(a.a) - 1; i >= 0; i--) {
+ q.a.resize(ssize(a.a));
+ for (ll i = ssize(a.a) - 1; i >= 0; i--) {
r *= base;
r += a.a[i];
- ll s1 = sz(r.a) <= sz(b.a) ? 0 : r.a[sz(b.a)];
- ll s2 = sz(r.a) <= sz(b.a) - 1 ? 0 : r.a[sz(b.a) - 1];
+ ll s1 = ssize(r.a) <= ssize(b.a) ? 0 : r.a[ssize(b.a)];
+ ll s2 = ssize(r.a) <= ssize(b.a) - 1 ? 0 : r.a[ssize(b.a) - 1];
ll d = (base * s1 + s2) / b.a.back();
r -= b * d;
while (r < 0) r += b, --d;
@@ -102,7 +103,7 @@ struct bigint {
void operator/=(ll v) {
if (v < 0) sign = -sign, v = -v;
- for (ll i = sz(a) - 1, rem = 0; i >= 0; --i) {
+ for (ll i = ssize(a) - 1, rem = 0; i >= 0; --i) {
ll cur = a[i] + rem * base;
a[i] = cur / v;
rem = cur % v;
@@ -119,7 +120,7 @@ struct bigint {
ll operator%(ll v) const {
if (v < 0) v = -v;
ll m = 0;
- for (ll i = sz(a) - 1; i >= 0; --i)
+ for (ll i = ssize(a) - 1; i >= 0; --i)
m = (a[i] + m * base) % v;
return m * sign;
}
@@ -139,9 +140,9 @@ struct bigint {
bool operator<(const bigint& v) const {
if (sign != v.sign) return sign < v.sign;
- if (sz(a) != sz(v.a))
- return sz(a) * sign < sz(v.a) * v.sign;
- for (ll i = sz(a) - 1; i >= 0; i--)
+ if (ssize(a) != ssize(v.a))
+ return ssize(a) * sign < ssize(v.a) * v.sign;
+ for (ll i = ssize(a) - 1; i >= 0; i--)
if (a[i] != v.a[i])
return a[i] * sign < v.a[i] * sign;
return false;
@@ -169,7 +170,7 @@ struct bigint {
}
bool isZero() const {
- return a.empty() || (sz(a) == 1 && a[0] == 0);
+ return a.empty() || (ssize(a) == 1 && a[0] == 0);
}
bigint operator-() const {
@@ -186,7 +187,7 @@ struct bigint {
ll longValue() const {
ll res = 0;
- for (ll i = sz(a) - 1; i >= 0; i--)
+ for (ll i = ssize(a) - 1; i >= 0; i--)
res = res * base + a[i];
return res * sign;
}
@@ -195,11 +196,11 @@ struct bigint {
sign = 1;
a.clear();
ll pos = 0;
- while (pos < sz(s) && (s[pos] == '-' || s[pos] == '+')) {
+ while (pos < ssize(s) && (s[pos] == '-' || s[pos] == '+')) {
if (s[pos] == '-') sign = -sign;
++pos;
}
- for (ll i = sz(s) - 1; i >= pos; i -= base_digits) {
+ for (ll i = ssize(s) - 1; i >= pos; i -= base_digits) {
ll x = 0;
for (ll j = max(pos, i - base_digits + 1); j <= i; j++)
x = x * 10 + s[j] - '0';
@@ -218,13 +219,13 @@ struct bigint {
friend ostream& operator<<(ostream& stream, const bigint& v) {
if (v.sign == -1) stream << '-';
stream << (v.a.empty() ? 0 : v.a.back());
- for (ll i = sz(v.a) - 2; i >= 0; --i)
+ for (ll i = ssize(v.a) - 2; i >= 0; --i)
stream << setw(base_digits) << setfill('0') << v.a[i];
return stream;
}
static vll karatsubaMultiply(const vll& a, const vll& b) {
- ll n = sz(a);
+ ll n = ssize(a);
vll res(n + n);
if (n <= 32) {
for (ll i = 0; i < n; i++)
@@ -242,25 +243,25 @@ struct bigint {
for (ll i = 0; i < k; i++) a2[i] += a1[i];
for (ll i = 0; i < k; i++) b2[i] += b1[i];
vll r = karatsubaMultiply(a2, b2);
- for (ll i = 0; i < sz(a1b1); i++) r[i] -= a1b1[i];
- for (ll i = 0; i < sz(a2b2); i++) r[i] -= a2b2[i];
- for (ll i = 0; i < sz(r); i++) res[i + k] += r[i];
- for (ll i = 0; i < sz(a1b1); i++) res[i] += a1b1[i];
- for (ll i = 0; i < sz(a2b2); i++) res[i + n] += a2b2[i];
+ for (ll i = 0; i < ssize(a1b1); i++) r[i] -= a1b1[i];
+ for (ll i = 0; i < ssize(a2b2); i++) r[i] -= a2b2[i];
+ for (ll i = 0; i < ssize(r); i++) res[i + k] += r[i];
+ for (ll i = 0; i < ssize(a1b1); i++) res[i] += a1b1[i];
+ for (ll i = 0; i < ssize(a2b2); i++) res[i + n] += a2b2[i];
return res;
}
bigint operator*(const bigint& v) const {
vll ta(a.begin(), a.end());
vll va(v.a.begin(), v.a.end());
- while (sz(ta) < sz(va)) ta.push_back(0);
- while (sz(va) < sz(ta)) va.push_back(0);
- while (sz(ta) & (sz(ta) - 1))
+ while (ssize(ta) < ssize(va)) ta.push_back(0);
+ while (ssize(va) < ssize(ta)) va.push_back(0);
+ while (ssize(ta) & (ssize(ta) - 1))
ta.push_back(0), va.push_back(0);
vll ra = karatsubaMultiply(ta, va);
bigint res;
res.sign = sign * v.sign;
- for (ll i = 0, carry = 0; i < sz(ra); i++) {
+ for (ll i = 0, carry = 0; i < ssize(ra); i++) {
ll cur = ra[i] + carry;
res.a.push_back(cur % base);
carry = cur / base;
diff --git a/content/math/binomial0.cpp b/content/math/binomial0.cpp
index 5f2ccaa..f37aea5 100644
--- a/content/math/binomial0.cpp
+++ b/content/math/binomial0.cpp
@@ -10,5 +10,5 @@ void precalc() {
ll calc_binom(ll n, ll k) {
if (n < 0 || n < k || k < 0) return 0;
- return (inv[k] * inv[n-k] % mod) * fac[n] % mod;
+ return (fac[n] * inv[n-k] % mod) * inv[k] % mod;
}
diff --git a/content/math/binomial1.cpp b/content/math/binomial1.cpp
index dab20b3..d0fce18 100644
--- a/content/math/binomial1.cpp
+++ b/content/math/binomial1.cpp
@@ -1,7 +1,7 @@
ll calc_binom(ll n, ll k) {
if (k > n) return 0;
ll r = 1;
- for (ll d = 1; d <= k; d++) {// Reihenfolge => Teilbarkeit
+ for (ll d = 1; d <= k; d++) { // Reihenfolge => Teilbarkeit
r *= n--, r /= d;
}
return r;
diff --git a/content/math/discreteLogarithm.cpp b/content/math/discreteLogarithm.cpp
index 68866e0..844bd27 100644
--- a/content/math/discreteLogarithm.cpp
+++ b/content/math/discreteLogarithm.cpp
@@ -5,11 +5,11 @@ ll dlog(ll a, ll b, ll m) { //a > 0!
vals[i] = {e, i};
}
vals.emplace_back(m, 0);
- sort(all(vals));
+ ranges::sort(vals);
ll fact = powMod(a, m - bound - 1, m);
for (ll i = 0; i < m; i += bound, b = (b * fact) % m) {
- auto it = lower_bound(all(vals), pair<ll, ll>{b, 0});
+ auto it = ranges::lower_bound(vals, pair<ll, ll>{b, 0});
if (it->first == b) {
return (i + it->second) % m;
}}
diff --git a/content/math/divisors.cpp b/content/math/divisors.cpp
index 5afd4fb..2a17f54 100644
--- a/content/math/divisors.cpp
+++ b/content/math/divisors.cpp
@@ -2,7 +2,7 @@ ll countDivisors(ll n) {
ll res = 1;
for (ll i = 2; i * i * i <= n; i++) {
ll c = 0;
- while (n % i == 0) {n /= i; c++;}
+ while (n % i == 0) { n /= i; c++; }
res *= c + 1;
}
if (isPrime(n)) res *= 2;
diff --git a/content/math/gauss.cpp b/content/math/gauss.cpp
index d431e52..719f573 100644
--- a/content/math/gauss.cpp
+++ b/content/math/gauss.cpp
@@ -7,7 +7,7 @@ void takeAll(int n, int line) {
for (int i = 0; i < n; i++) {
if (i == line) continue;
double diff = mat[i][line];
- for (int j = 0; j < sz(mat[i]); j++) {
+ for (int j = 0; j < ssize(mat[i]); j++) {
mat[i][j] -= diff * mat[line][j];
}}}
@@ -22,7 +22,7 @@ int gauss(int n) {
if (abs(mat[i][i]) > EPS) {
normalLine(i);
takeAll(n, i);
- done[i] = true;
+ done[i] = true;
}}
for (int i = 0; i < n; i++) { // gauss fertig, prüfe Lösung
bool allZero = true;
diff --git a/content/math/gcd-lcm.cpp b/content/math/gcd-lcm.cpp
index a1c63c8..1ee7ef5 100644
--- a/content/math/gcd-lcm.cpp
+++ b/content/math/gcd-lcm.cpp
@@ -1,2 +1,2 @@
-ll gcd(ll a, ll b) {return b == 0 ? a : gcd(b, a % b);}
-ll lcm(ll a, ll b) {return a * (b / gcd(a, b));}
+ll gcd(ll a, ll b) { return b == 0 ? a : gcd(b, a % b); }
+ll lcm(ll a, ll b) { return a * (b / gcd(a, b)); }
diff --git a/content/math/inversions.cpp b/content/math/inversions.cpp
index 9e47f9b..289161f 100644
--- a/content/math/inversions.cpp
+++ b/content/math/inversions.cpp
@@ -1,7 +1,7 @@
ll inversions(const vector<ll>& v) {
Tree<pair<ll, ll>> t; //ordered statistics tree @\sourceref{datastructures/pbds.cpp}@
ll res = 0;
- for (ll i = 0; i < sz(v); i++) {
+ for (ll i = 0; i < ssize(v); i++) {
res += i - t.order_of_key({v[i], i});
t.insert({v[i], i});
}
diff --git a/content/math/inversionsMerge.cpp b/content/math/inversionsMerge.cpp
index 8235b11..50fe37b 100644
--- a/content/math/inversionsMerge.cpp
+++ b/content/math/inversionsMerge.cpp
@@ -2,26 +2,26 @@
ll merge(vector<ll>& v, vector<ll>& left, vector<ll>& right) {
int a = 0, b = 0, i = 0;
ll inv = 0;
- while (a < sz(left) && b < sz(right)) {
+ while (a < ssize(left) && b < ssize(right)) {
if (left[a] < right[b]) v[i++] = left[a++];
else {
- inv += sz(left) - a;
+ inv += ssize(left) - a;
v[i++] = right[b++];
}
}
- while (a < sz(left)) v[i++] = left[a++];
- while (b < sz(right)) v[i++] = right[b++];
+ while (a < ssize(left)) v[i++] = left[a++];
+ while (b < ssize(right)) v[i++] = right[b++];
return inv;
}
ll mergeSort(vector<ll> &v) { // Sortiert v und gibt Inversionszahl zurück.
- int n = sz(v);
+ int n = ssize(v);
vector<ll> left(n / 2), right((n + 1) / 2);
for (int i = 0; i < n / 2; i++) left[i] = v[i];
for (int i = n / 2; i < n; i++) right[i - n / 2] = v[i];
ll result = 0;
- if (sz(left) > 1) result += mergeSort(left);
- if (sz(right) > 1) result += mergeSort(right);
+ if (ssize(left) > 1) result += mergeSort(left);
+ if (ssize(right) > 1) result += mergeSort(right);
return result + merge(v, left, right);
}
diff --git a/content/math/lgsFp.cpp b/content/math/lgsFp.cpp
index bf18c86..64e4c09 100644
--- a/content/math/lgsFp.cpp
+++ b/content/math/lgsFp.cpp
@@ -7,7 +7,7 @@ void takeAll(int n, int line, ll p) {
for (int i = 0; i < n; i++) {
if (i == line) continue;
ll diff = mat[i][line];
- for (int j = 0; j < sz(mat[i]); j++) {
+ for (int j = 0; j < ssize(mat[i]); j++) {
mat[i][j] -= (diff * mat[line][j]) % p;
mat[i][j] = (mat[i][j] + p) % p;
}}}
diff --git a/content/math/linearRecurrence.cpp b/content/math/linearRecurrence.cpp
index a8adacd..eb04566 100644
--- a/content/math/linearRecurrence.cpp
+++ b/content/math/linearRecurrence.cpp
@@ -1,9 +1,9 @@
constexpr ll mod = 998244353;
// oder ntt mul @\sourceref{math/transforms/ntt.cpp}@
vector<ll> mul(const vector<ll>& a, const vector<ll>& b) {
- vector<ll> c(sz(a) + sz(b) - 1);
- for (int i = 0; i < sz(a); i++) {
- for (int j = 0; j < sz(b); j++) {
+ vector<ll> c(ssize(a) + ssize(b) - 1);
+ for (int i = 0; i < ssize(a); i++) {
+ for (int j = 0; j < ssize(b); j++) {
c[i+j] += a[i]*b[j] % mod;
}}
for (ll& x : c) x %= mod;
@@ -11,7 +11,7 @@ vector<ll> mul(const vector<ll>& a, const vector<ll>& b) {
}
ll kthTerm(const vector<ll>& f, const vector<ll>& c, ll k) {
- int n = sz(c);
+ int n = ssize(c);
vector<ll> q(n + 1, 1);
for (int i = 0; i < n; i++) q[i + 1] = (mod - c[i]) % mod;
vector<ll> p = mul(f, q);
diff --git a/content/math/linearRecurrenceOld.cpp b/content/math/linearRecurrenceOld.cpp
index 2501e64..f67398d 100644
--- a/content/math/linearRecurrenceOld.cpp
+++ b/content/math/linearRecurrenceOld.cpp
@@ -1,7 +1,7 @@
constexpr ll mod = 1'000'000'007;
vector<ll> modMul(const vector<ll>& a, const vector<ll>& b,
const vector<ll>& c) {
- ll n = sz(c);
+ ll n = ssize(c);
vector<ll> res(n * 2 + 1);
for (int i = 0; i <= n; i++) { //a*b
for (int j = 0; j <= n; j++) {
@@ -18,8 +18,8 @@ vector<ll> modMul(const vector<ll>& a, const vector<ll>& b,
}
ll kthTerm(const vector<ll>& f, const vector<ll>& c, ll k) {
- assert(sz(f) == sz(c));
- vector<ll> tmp(sz(c) + 1), a(sz(c) + 1);
+ assert(ssize(f) == ssize(c));
+ vector<ll> tmp(ssize(c) + 1), a(ssize(c) + 1);
tmp[0] = a[1] = 1; //tmp = (x^k) % c
for (k++; k > 0; k /= 2) {
@@ -28,6 +28,6 @@ ll kthTerm(const vector<ll>& f, const vector<ll>& c, ll k) {
}
ll res = 0;
- for (int i = 0; i < sz(c); i++) res += (tmp[i+1] * f[i]) % mod;
+ for (int i = 0; i < ssize(c); i++) res += (tmp[i+1] * f[i]) % mod;
return res % mod;
}
diff --git a/content/math/linearSieve.cpp b/content/math/linearSieve.cpp
index 64440dd..2ea1e94 100644
--- a/content/math/linearSieve.cpp
+++ b/content/math/linearSieve.cpp
@@ -3,12 +3,12 @@ ll small[N], power[N], sieved[N];
vector<ll> primes;
//wird aufgerufen mit (p^k, p, k) für prime p und k > 0
-ll mu(ll pk, ll p, ll k) {return -(k == 1);}
-ll phi(ll pk, ll p, ll k) {return pk - pk / p;}
-ll div(ll pk, ll p, ll k) {return k+1;}
-ll divSum(ll pk, ll p, ll k) {return (pk*p-1) / (p - 1);}
-ll square(ll pk, ll p, ll k) {return k % 2 ? pk / p : pk;}
-ll squareFree(ll pk, ll p, ll k) {return p;}
+ll mu(ll pk, ll p, ll k) { return -(k == 1); }
+ll phi(ll pk, ll p, ll k) { return pk - pk / p; }
+ll div(ll pk, ll p, ll k) { return k+1; }
+ll divSum(ll pk, ll p, ll k) { return (pk*p-1) / (p - 1); }
+ll square(ll pk, ll p, ll k) { return k % 2 ? pk / p : pk; }
+ll squareFree(ll pk, ll p, ll k) { return p; }
void sieve() { // O(N)
small[1] = power[1] = sieved[1] = 1;
diff --git a/content/math/longestIncreasingSubsequence.cpp b/content/math/longestIncreasingSubsequence.cpp
index fcb63b4..e4863d0 100644
--- a/content/math/longestIncreasingSubsequence.cpp
+++ b/content/math/longestIncreasingSubsequence.cpp
@@ -1,8 +1,8 @@
vector<int> lis(vector<ll>& a) {
- int n = sz(a), len = 0;
+ int n = ssize(a), len = 0;
vector<ll> dp(n, INF), dp_id(n), prev(n);
for (int i = 0; i < n; i++) {
- int pos = lower_bound(all(dp), a[i]) - dp.begin();
+ int pos = ranges::lower_bound(dp, a[i]) - begin(dp);
dp[pos] = a[i];
dp_id[pos] = i;
prev[i] = pos ? dp_id[pos - 1] : -1;
diff --git a/content/math/math.tex b/content/math/math.tex
index 4ac6c9e..fdf7081 100644
--- a/content/math/math.tex
+++ b/content/math/math.tex
@@ -26,7 +26,7 @@
\end{methods}
\sourcecode{math/permIndex.cpp}
\end{algorithm}
-\clearpage
+\columnbreak
\subsection{Mod-Exponent und Multiplikation über $\boldsymbol{\mathbb{F}_p}$}
%\vspace{-1.25em}
@@ -100,8 +100,8 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch:
wenn $a\equiv~b \bmod \ggT(m, n)$.
In diesem Fall sind keine Faktoren
auf der linken Seite erlaubt.
- \end{itemize}
- \sourcecode{math/chineseRemainder.cpp}
+ \end{itemize}
+ \sourcecode{math/chineseRemainder.cpp}
\end{algorithm}
\begin{algorithm}{Primzahltest \& Faktorisierung}
@@ -121,7 +121,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch:
\begin{algorithm}{Matrix-Exponent}
\begin{methods}
\method{precalc}{berechnet $m^{2^b}$ vor}{\log(b)\*n^3}
- \method{calc}{berechnet $m^b\cdot$}{\log(b)\cdot n^2}
+ \method{calc}{berechnet $m^b \cdot v$}{\log(b)\cdot n^2}
\end{methods}
\textbf{Tipp:} wenn \code{v[x]=1} und \code{0} sonst, dann ist \code{res[y]} = $m^b_{y,x}$.
\sourcecode{math/matrixPower.cpp}
@@ -236,7 +236,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch:
\sourcecode{math/legendre.cpp}
\end{algorithm}
-\begin{algorithm}{Lineares Sieb und Multiplikative Funktionen}
+\begin{algorithm}{Lineares Sieb und multiplikative Funktionen}
Eine (zahlentheoretische) Funktion $f$ heißt multiplikativ wenn $f(1)=1$ und $f(a\cdot b)=f(a)\cdot f(b)$, falls $\ggT(a,b)=1$.
$\Rightarrow$ Es ist ausreichend $f(p^k)$ für alle primen $p$ und alle $k$ zu kennen.
@@ -250,7 +250,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch:
\textbf{Wichtig:} Sieb rechts ist schneller für \code{isPrime} oder \code{primes}!
\sourcecode{math/linearSieve.cpp}
- \textbf{\textsc{Möbius}-Funktion:}
+ \textbf{\textsc{Möbius} Funktion:}
\begin{itemize}
\item $\mu(n)=+1$, falls $n$ quadratfrei ist und gerade viele Primteiler hat
\item $\mu(n)=-1$, falls $n$ quadratfrei ist und ungerade viele Primteiler hat
@@ -263,7 +263,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch:
\item $p$ prim, $k \in \mathbb{N}$:
$~\varphi(p^k) = p^k - p^{k - 1}$
- \item \textbf{Euler's Theorem:}
+ \item \textbf{\textsc{Euler}'s Theorem:}
Für $b \geq \varphi(c)$ gilt: $a^b \equiv a^{b \bmod \varphi(c) + \varphi(c)} \pmod{c}$. Darüber hinaus gilt: $\gcd(a, c) = 1 \Leftrightarrow a^b \equiv a^{b \bmod \varphi(c)} \pmod{c}$.
Falls $m$ prim ist, liefert das den \textbf{kleinen Satz von \textsc{Fermat}}:
$a^{m} \equiv a \pmod{m}$
@@ -321,6 +321,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch:
\end{algorithm}
\begin{algorithm}{Polynome, FFT, NTT \& andere Transformationen}
+ \label{fft}
Multipliziert Polynome $A$ und $B$.
\begin{itemize}
\item $\deg(A \cdot B) = \deg(A) + \deg(B)$
@@ -328,14 +329,15 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch:
$\deg(A \cdot B) + 1$ haben.
Größe muss eine Zweierpotenz sein.
\item Für ganzzahlige Koeffizienten: \code{(ll)round(real(a[i]))}
- \item \emph{xor}, \emph{or} und \emph{and} Transform funktioniert auch mit \code{double} oder modulo einer Primzahl $p$ falls $p \geq 2^{\texttt{bits}}$
+ \item \emph{or} Transform berechnet sum over subsets
+ $\rightarrow$ inverse für inclusion/exclusion
\end{itemize}
%\sourcecode{math/fft.cpp}
%\sourcecode{math/ntt.cpp}
\sourcecode{math/transforms/fft.cpp}
\sourcecode{math/transforms/ntt.cpp}
\sourcecode{math/transforms/bitwiseTransforms.cpp}
- Multiplikation mit 2 transforms statt 3: (nur benutzten wenn nötig!)
+ Multiplikation mit 2 Transforms statt 3: (nur benutzen wenn nötig!)
\sourcecode{math/transforms/fftMul.cpp}
\end{algorithm}
@@ -345,7 +347,7 @@ sich alle Lösungen von $x^2-ny^2=c$ berechnen durch:
\subsection{Kombinatorik}
-\paragraph{Wilsons Theorem}
+\paragraph{\textsc{Wilson}'s Theorem}
A number $n$ is prime if and only if
$(n-1)!\equiv -1\bmod{n}$.\\
($n$ is prime if and only if $(m-1)!\cdot(n-m)!\equiv(-1)^m\bmod{n}$ for all $m$ in $\{1,\dots,n\}$)
@@ -357,14 +359,14 @@ $(n-1)!\equiv -1\bmod{n}$.\\
\end{cases}
\end{align*}
-\paragraph{\textsc{Zeckendorfs} Theorem}
+\paragraph{\textsc{Zeckendorf}'s Theorem}
Jede positive natürliche Zahl kann eindeutig als Summe einer oder mehrerer
verschiedener \textsc{Fibonacci}-Zahlen geschrieben werden, sodass keine zwei
aufeinanderfolgenden \textsc{Fibonacci}-Zahlen in der Summe vorkommen.\\
\emph{Lösung:} Greedy, nimm immer die größte \textsc{Fibonacci}-Zahl, die noch
hineinpasst.
-\paragraph{\textsc{Lucas}-Theorem}
+\paragraph{\textsc{Lucas}'s Theorem}
Ist $p$ prim, $m=\sum_{i=0}^km_ip^i$, $n=\sum_{i=0}^kn_ip^i$ ($p$-adische Darstellung),
so gilt
\vspace{-0.75\baselineskip}
@@ -542,10 +544,10 @@ Wenn man $k$ Spiele in den Zuständen $X_1, \ldots, X_k$ hat, dann ist die \text
\input{math/tables/series}
\subsection{Wichtige Zahlen}
-\input{math/tables/composite}
+\input{math/tables/prime-composite}
-\subsection{Recover $\boldsymbol{x}$ and $\boldsymbol{y}$ from $\boldsymbol{y}$ from $\boldsymbol{x\*y^{-1}}$ }
-\method{recover}{findet $x$ und $y$ für $x=x\*y^{-1}\bmod m$}{\log(m)}
+\subsection{Recover $\boldsymbol{x}$ and $\boldsymbol{y}$ from $\boldsymbol{x\*y^{-1}}$ }
+\method{recover}{findet $x$ und $y$ für $c=x\*y^{-1}\bmod m$}{\log(m)}
\textbf{WICHTIG:} $x$ und $y$ müssen kleiner als $\sqrt{\nicefrac{m}{2}}$ sein!
\sourcecode{math/recover.cpp}
diff --git a/content/math/matrixPower.cpp b/content/math/matrixPower.cpp
index d981e6e..d80dac6 100644
--- a/content/math/matrixPower.cpp
+++ b/content/math/matrixPower.cpp
@@ -1,14 +1,14 @@
vector<mat> pows;
void precalc(mat m) {
- pows = {mat(sz(m.m), 1), m};
- for (int i = 1; i < 60; i++) pows.push_back(pows[i] * pows[i]);
+ pows = {m};
+ for (int i = 0; i < 60; i++) pows.push_back(pows[i] * pows[i]);
}
auto calc(ll b, vector<ll> v) {
- for (ll i = 1; b > 0; i++) {
+ for (ll i = 0; b > 0; i++) {
if (b & 1) v = pows[i] * v;
- b /= 2;
+ b >>= 1;
}
return v;
}
diff --git a/content/math/permIndex.cpp b/content/math/permIndex.cpp
index 4cffc12..563b33a 100644
--- a/content/math/permIndex.cpp
+++ b/content/math/permIndex.cpp
@@ -1,12 +1,12 @@
ll permIndex(vector<ll> v) {
Tree<ll> t;
- reverse(all(v));
+ ranges::reverse(v);
for (ll& x : v) {
t.insert(x);
x = t.order_of_key(x);
}
ll res = 0;
- for (int i = sz(v); i > 0; i--) {
+ for (int i = ssize(v); i > 0; i--) {
res = res * i + v[i - 1];
}
return res;
diff --git a/content/math/piLegendre.cpp b/content/math/piLegendre.cpp
index 21b974b..6401a4f 100644
--- a/content/math/piLegendre.cpp
+++ b/content/math/piLegendre.cpp
@@ -1,23 +1,23 @@
-constexpr ll cache = 500; // requires O(cache^3)
-vector<vector<ll>> memo(cache * cache, vector<ll>(cache));
-
-ll pi(ll n);
-
-ll phi(ll n, ll k) {
- if (n <= 1 || k < 0) return 0;
- if (n <= primes[k]) return n - 1;
- if (n < N && primes[k] * primes[k] > n) return n - pi(n) + k;
- bool ok = n < cache * cache;
- if (ok && memo[n][k] > 0) return memo[n][k];
- ll res = n/primes[k] - phi(n/primes[k], k - 1) + phi(n, k - 1);
- if (ok) memo[n][k] = res;
- return res;
-}
-
-ll pi(ll n) {
- if (n < N) { // implement this as O(1) lookup for speedup!
- return distance(primes.begin(), upper_bound(all(primes), n));
- } else {
- ll k = pi(sqrtl(n) + 1);
- return n - phi(n, k) + k;
-}}
+constexpr ll cache = 500; // requires O(cache^3)
+vector<vector<ll>> memo(cache * cache, vector<ll>(cache));
+
+ll pi(ll n);
+
+ll phi(ll n, ll k) {
+ if (n <= 1 || k < 0) return 0;
+ if (n <= primes[k]) return n - 1;
+ if (n < N && primes[k] * primes[k] > n) return n - pi(n) + k;
+ bool ok = n < cache * cache;
+ if (ok && memo[n][k] > 0) return memo[n][k];
+ ll res = n/primes[k] - phi(n/primes[k], k - 1) + phi(n, k - 1);
+ if (ok) memo[n][k] = res;
+ return res;
+}
+
+ll pi(ll n) {
+ if (n < N) { // implement this as O(1) lookup for speedup!
+ return ranges::upper_bound(primes, n) - begin(primes);
+ } else {
+ ll k = pi(sqrtl(n) + 1);
+ return n - phi(n, k) + k;
+}}
diff --git a/content/math/polynomial.cpp b/content/math/polynomial.cpp
index 84f3aaa..12a4fd7 100644
--- a/content/math/polynomial.cpp
+++ b/content/math/polynomial.cpp
@@ -4,15 +4,15 @@ struct poly {
poly(int deg = 0) : data(1 + deg) {}
poly(initializer_list<ll> _data) : data(_data) {}
- int size() const {return sz(data);}
+ int size() const { return ssize(data); }
void trim() {
for (ll& x : data) x = (x % mod + mod) % mod;
while (size() > 1 && data.back() == 0) data.pop_back();
}
- ll& operator[](int x) {return data[x];}
- const ll& operator[](int x) const {return data[x];}
+ ll& operator[](int x) { return data[x]; }
+ const ll& operator[](int x) const { return data[x]; }
ll operator()(int x) const {
ll res = 0;
diff --git a/content/math/primeSieve.cpp b/content/math/primeSieve.cpp
index 1b0f514..2b2bf26 100644
--- a/content/math/primeSieve.cpp
+++ b/content/math/primeSieve.cpp
@@ -8,7 +8,7 @@ bool isPrime(ll x) {
}
void primeSieve() {
- for (ll i = 3; i < N; i += 2) {// i * i < N reicht für isPrime
+ for (ll i = 3; i < N; i += 2) { // i * i < N reicht für isPrime
if (!isNotPrime[i / 2]) {
primes.push_back(i); // optional
for (ll j = i * i; j < N; j+= 2 * i) {
diff --git a/content/math/recover.cpp b/content/math/recover.cpp
index 1a593f0..a4c22aa 100644
--- a/content/math/recover.cpp
+++ b/content/math/recover.cpp
@@ -1,4 +1,4 @@
-ll sq(ll x) {return x*x;}
+ll sq(ll x) { return x*x; }
array<ll, 2> recover(ll c, ll m) {
array<ll, 2> u = {m, 0}, v = {c, 1};
diff --git a/content/math/rho.cpp b/content/math/rho.cpp
index ad640cd..c7f7a70 100644
--- a/content/math/rho.cpp
+++ b/content/math/rho.cpp
@@ -2,7 +2,7 @@ using lll = __int128;
ll rho(ll n) { // Findet Faktor < n, nicht unbedingt prim.
if (n % 2 == 0) return 2;
ll x = 0, y = 0, prd = 2, i = n/2 + 7;
- auto f = [&](lll c){return (c * c + i) % n;};
+ auto f = [&](lll c) { return (c * c + i) % n; };
for (ll t = 30; t % 40 || gcd(prd, n) == 1; t++) {
if (x == y) x = ++i, y = f(x);
if (ll q = (lll)prd * abs(x-y) % n; q) prd = q;
@@ -13,7 +13,7 @@ ll rho(ll n) { // Findet Faktor < n, nicht unbedingt prim.
void factor(ll n, map<ll, int>& facts) {
if (n == 1) return;
- if (isPrime(n)) {facts[n]++; return;}
+ if (isPrime(n)) { facts[n]++; return; }
ll f = rho(n);
factor(n / f, facts); factor(f, facts);
}
diff --git a/content/math/shortModInv.cpp b/content/math/shortModInv.cpp
index cf91ca0..7d3002c 100644
--- a/content/math/shortModInv.cpp
+++ b/content/math/shortModInv.cpp
@@ -1,3 +1,3 @@
ll multInv(ll x, ll m) { // x^{-1} mod m
- return 1 < x ? m - multInv(m % x, x) * m / x : 1;
+ return 1 < (x %= m) ? m - multInv(m, x) * m / x : 1;
}
diff --git a/content/math/simpson.cpp b/content/math/simpson.cpp
index 7f237a4..da9c002 100644
--- a/content/math/simpson.cpp
+++ b/content/math/simpson.cpp
@@ -1,4 +1,4 @@
-//double f(double x) {return x;}
+//double f(double x) { return x; }
double simps(double a, double b) {
return (f(a) + 4.0 * f((a + b) / 2.0) + f(b)) * (b - a) / 6.0;
diff --git a/content/math/sqrtModCipolla.cpp b/content/math/sqrtModCipolla.cpp
index 1fac0c5..c062646 100644
--- a/content/math/sqrtModCipolla.cpp
+++ b/content/math/sqrtModCipolla.cpp
@@ -1,4 +1,4 @@
-ll sqrtMod(ll a, ll p) {// teste mit legendre ob lösung existiert
+ll sqrtMod(ll a, ll p) {// teste mit Legendre ob Lösung existiert
if (a < 2) return a;
ll t = 0;
while (legendre((t*t-4*a) % p, p) >= 0) t = rng() % p;
diff --git a/content/math/tables/composite.tex b/content/math/tables/composite.tex
deleted file mode 100644
index 7a6ab09..0000000
--- a/content/math/tables/composite.tex
+++ /dev/null
@@ -1,26 +0,0 @@
-\begin{expandtable}
-\begin{tabularx}{\linewidth}{|r||r|R||r||r|}
- \hline
- $10^x$ & Highly Composite & \# Divs & \# prime Divs & \# Primes \\
- \hline
- 1 & 6 & 4 & 2 & 4 \\
- 2 & 60 & 12 & 3 & 25 \\
- 3 & 840 & 32 & 4 & 168 \\
- 4 & 7\,560 & 64 & 5 & 1\,229 \\
- 5 & 83\,160 & 128 & 6 & 9\,592 \\
- 6 & 720\,720 & 240 & 7 & 78\,498 \\
- 7 & 8\,648\,640 & 448 & 8 & 664\,579 \\
- 8 & 73\,513\,440 & 768 & 8 & 5\,761\,455 \\
- 9 & 735\,134\,400 & 1\,344 & 9 & 50\,847\,534 \\
- 10 & 6\,983\,776\,800 & 2\,304 & 10 & 455\,052\,511 \\
- 11 & 97\,772\,875\,200 & 4\,032 & 10 & 4\,118\,054\,813 \\
- 12 & 963\,761\,198\,400 & 6\,720 & 11 & 37\,607\,912\,018 \\
- 13 & 9\,316\,358\,251\,200 & 10\,752 & 12 & 346\,065\,536\,839 \\
- 14 & 97\,821\,761\,637\,600 & 17\,280 & 12 & 3\,204\,941\,750\,802 \\
- 15 & 866\,421\,317\,361\,600 & 26\,880 & 13 & 29\,844\,570\,422\,669 \\
- 16 & 8\,086\,598\,962\,041\,600 & 41\,472 & 13 & 279\,238\,341\,033\,925 \\
- 17 & 74\,801\,040\,398\,884\,800 & 64\,512 & 14 & 2\,623\,557\,157\,654\,233 \\
- 18 & 897\,612\,484\,786\,617\,600 & 103\,680 & 16 & 24\,739\,954\,287\,740\,860 \\
- \hline
-\end{tabularx}
-\end{expandtable}
diff --git a/content/math/tables/prime-composite.tex b/content/math/tables/prime-composite.tex
new file mode 100644
index 0000000..b8adadf
--- /dev/null
+++ b/content/math/tables/prime-composite.tex
@@ -0,0 +1,31 @@
+\begin{expandtable}
+\begin{tabularx}{\linewidth}{|r|rIr|rIr|r|R|}
+ \hline
+ \multirow{2}{*}{$10^x$}
+ & \multirow{2}{*}{Highly Composite}
+ & \multirow{2}{*}{\# Divs}
+ & \multicolumn{2}{c|}{Prime}
+ & \multirow{2}{*}{\# Primes} & \# Prime \\
+ & & & $<$ & $>$ & & Factors \\
+ \hline
+ 1 & 6 & 4 & $-3$ & $+1$ & 4 & 2 \\
+ 2 & 60 & 12 & $-3$ & $+1$ & 25 & 3 \\
+ 3 & 840 & 32 & $-3$ & $+9$ & 168 & 4 \\
+ 4 & 7\,560 & 64 & $-27$ & $+7$ & 1\,229 & 5 \\
+ 5 & 83\,160 & 128 & $-9$ & $+3$ & 9\,592 & 6 \\
+ 6 & 720\,720 & 240 & $-17$ & $+3$ & 78\,498 & 7 \\
+ 7 & 8\,648\,640 & 448 & $-9$ & $+19$ & 664\,579 & 8 \\
+ 8 & 73\,513\,440 & 768 & $-11$ & $+7$ & 5\,761\,455 & 8 \\
+ 9 & 735\,134\,400 & 1\,344 & $-63$ & $+7$ & 50\,847\,534 & 9 \\
+ 10 & 6\,983\,776\,800 & 2\,304 & $-33$ & $+19$ & 455\,052\,511 & 10 \\
+ 11 & 97\,772\,875\,200 & 4\,032 & $-23$ & $+3$ & 4\,118\,054\,813 & 10 \\
+ 12 & 963\,761\,198\,400 & 6\,720 & $-11$ & $+39$ & 37\,607\,912\,018 & 11 \\
+ 13 & 9\,316\,358\,251\,200 & 10\,752 & $-29$ & $+37$ & 346\,065\,536\,839 & 12 \\
+ 14 & 97\,821\,761\,637\,600 & 17\,280 & $-27$ & $+31$ & 3\,204\,941\,750\,802 & 12 \\
+ 15 & 866\,421\,317\,361\,600 & 26\,880 & $-11$ & $+37$ & 29\,844\,570\,422\,669 & 13 \\
+ 16 & 8\,086\,598\,962\,041\,600 & 41\,472 & $-63$ & $+61$ & 279\,238\,341\,033\,925 & 13 \\
+ 17 & 74\,801\,040\,398\,884\,800 & 64\,512 & $-3$ & $+3$ & 2\,623\,557\,157\,654\,233 & 14 \\
+ 18 & 897\,612\,484\,786\,617\,600 & 103\,680 & $-11$ & $+3$ & 24\,739\,954\,287\,740\,860 & 15 \\
+ \hline
+\end{tabularx}
+\end{expandtable}
diff --git a/content/math/transforms/andTransform.cpp b/content/math/transforms/andTransform.cpp
index 1fd9f5c..9e40c74 100644
--- a/content/math/transforms/andTransform.cpp
+++ b/content/math/transforms/andTransform.cpp
@@ -1,8 +1,8 @@
void fft(vector<ll>& a, bool inv = false) {
- int n = sz(a);
+ int n = ssize(a);
for (int s = 1; s < n; s *= 2) {
for (int i = 0; i < n; i += 2 * s) {
for (int j = i; j < i + s; j++) {
ll& u = a[j], &v = a[j + s];
- tie(u, v) = inv ? pair(v - u, u) : pair(v, u + v);
+ tie(u, v) = inv ? pair(u - v, v) : pair(u + v, v);
}}}}
diff --git a/content/math/transforms/bitwiseTransforms.cpp b/content/math/transforms/bitwiseTransforms.cpp
index 28561da..17f3163 100644
--- a/content/math/transforms/bitwiseTransforms.cpp
+++ b/content/math/transforms/bitwiseTransforms.cpp
@@ -1,11 +1,11 @@
void bitwiseConv(vector<ll>& a, bool inv = false) {
- int n = sz(a);
+ int n = ssize(a);
for (int s = 1; s < n; s *= 2) {
for (int i = 0; i < n; i += 2 * s) {
for (int j = i; j < i + s; j++) {
ll& u = a[j], &v = a[j + s];
- tie(u, v) = inv ? pair(v - u, u) : pair(v, u + v); // AND
- //tie(u, v) = inv ? pair(v, u - v) : pair(u + v, u); //OR
+ tie(u, v) = inv ? pair(u - v, v) : pair(u + v, v); // AND
+ //tie(u, v) = inv ? pair(u, v - u) : pair(u, v + u); //OR
//tie(u, v) = pair(u + v, u - v); // XOR
}}}
//if (inv) for (ll& x : a) x /= n; // XOR (careful with MOD)
diff --git a/content/math/transforms/fft.cpp b/content/math/transforms/fft.cpp
index 2bd95b2..1f80e36 100644
--- a/content/math/transforms/fft.cpp
+++ b/content/math/transforms/fft.cpp
@@ -1,7 +1,7 @@
using cplx = complex<double>;
void fft(vector<cplx>& a, bool inv = false) {
- int n = sz(a);
+ int n = ssize(a);
for (int i = 0, j = 1; j < n - 1; ++j) {
for (int k = n >> 1; k > (i ^= k); k >>= 1);
if (j < i) swap(a[i], a[j]);
diff --git a/content/math/transforms/fftMul.cpp b/content/math/transforms/fftMul.cpp
index 660ed79..da6a538 100644
--- a/content/math/transforms/fftMul.cpp
+++ b/content/math/transforms/fftMul.cpp
@@ -1,8 +1,8 @@
vector<cplx> mul(vector<ll>& a, vector<ll>& b) {
- int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1);
- vector<cplx> c(all(a)), d(n);
+ int n = 1 << (__lg(ssize(a) + ssize(b) - 1) + 1);
+ vector<cplx> c(begin(a), end(a)), d(n);
c.resize(n);
- for (int i = 0; i < sz(b); i++) c[i] = {real(c[i]), b[i]};
+ for (int i = 0; i < ssize(b); i++) c[i] = {real(c[i]), b[i]};
fft(c);
for (int i = 0; i < n; i++) {
int j = (n - i) & (n - 1);
diff --git a/content/math/transforms/multiplyBitwise.cpp b/content/math/transforms/multiplyBitwise.cpp
index f7cf169..5275b8c 100644
--- a/content/math/transforms/multiplyBitwise.cpp
+++ b/content/math/transforms/multiplyBitwise.cpp
@@ -1,5 +1,5 @@
vector<ll> mul(vector<ll> a, vector<ll> b) {
- int n = 1 << (__lg(2 * max(sz(a), sz(b)) - 1));
+ int n = 1 << (__lg(2 * max(ssize(a), ssize(b)) - 1));
a.resize(n), b.resize(n);
bitwiseConv(a), bitwiseConv(b);
for (int i=0; i<n; i++) a[i] *= b[i]; // MOD?
diff --git a/content/math/transforms/multiplyFFT.cpp b/content/math/transforms/multiplyFFT.cpp
index 0022d1f..963be94 100644
--- a/content/math/transforms/multiplyFFT.cpp
+++ b/content/math/transforms/multiplyFFT.cpp
@@ -1,6 +1,6 @@
vector<ll> mul(vector<ll>& a, vector<ll>& b) {
- int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1);
- vector<cplx> a2(all(a)), b2(all(b));
+ int n = 1 << (__lg(ssize(a) + ssize(b) - 1) + 1);
+ vector<cplx> a2(begin(a), end(a)), b2(begin(b), end(b));
a2.resize(n), b2.resize(n);
fft(a2), fft(b2);
for (int i=0; i<n; i++) a2[i] *= b2[i];
diff --git a/content/math/transforms/multiplyNTT.cpp b/content/math/transforms/multiplyNTT.cpp
index 806d124..d234ce3 100644
--- a/content/math/transforms/multiplyNTT.cpp
+++ b/content/math/transforms/multiplyNTT.cpp
@@ -1,5 +1,5 @@
vector<ll> mul(vector<ll> a, vector<ll> b) {
- int n = 1 << (__lg(sz(a) + sz(b) - 1) + 1);
+ int n = 1 << bit_width(size(a) + size(b) - 1);
a.resize(n), b.resize(n);
ntt(a), ntt(b);
for (int i=0; i<n; i++) a[i] = a[i] * b[i] % mod;
diff --git a/content/math/transforms/ntt.cpp b/content/math/transforms/ntt.cpp
index ca605d3..fc7874e 100644
--- a/content/math/transforms/ntt.cpp
+++ b/content/math/transforms/ntt.cpp
@@ -1,7 +1,7 @@
constexpr ll mod = 998244353, root = 3;
void ntt(vector<ll>& a, bool inv = false) {
- int n = sz(a);
+ int n = ssize(a);
auto b = a;
ll r = inv ? powMod(root, mod - 2, mod) : root;
diff --git a/content/math/transforms/orTransform.cpp b/content/math/transforms/orTransform.cpp
index eb1da44..6503a68 100644
--- a/content/math/transforms/orTransform.cpp
+++ b/content/math/transforms/orTransform.cpp
@@ -1,8 +1,8 @@
void fft(vector<ll>& a, bool inv = false) {
- int n = sz(a);
+ int n = ssize(a);
for (int s = 1; s < n; s *= 2) {
for (int i = 0; i < n; i += 2 * s) {
for (int j = i; j < i + s; j++) {
ll& u = a[j], &v = a[j + s];
- tie(u, v) = inv ? pair(v, u - v) : pair(u + v, u);
+ tie(u, v) = inv ? pair(u, v - u) : pair(u, v + u);
}}}}
diff --git a/content/math/transforms/seriesOperations.cpp b/content/math/transforms/seriesOperations.cpp
index b405698..3d8aa11 100644
--- a/content/math/transforms/seriesOperations.cpp
+++ b/content/math/transforms/seriesOperations.cpp
@@ -17,7 +17,7 @@ vector<ll> poly_inv(const vector<ll>& a, int n) {
}
vector<ll> poly_deriv(vector<ll> a) {
- for (int i = 1; i < sz(a); i++)
+ for (int i = 1; i < ssize(a); i++)
a[i-1] = a[i] * i % mod;
a.pop_back();
return a;
@@ -25,11 +25,11 @@ vector<ll> poly_deriv(vector<ll> a) {
vector<ll> poly_integr(vector<ll> a) {
static vector<ll> inv = {0, 1};
- for (static int i = 2; i <= sz(a); i++)
+ for (static int i = 2; i <= ssize(a); i++)
inv.push_back(mod - mod / i * inv[mod % i] % mod);
a.push_back(0);
- for (int i = sz(a) - 1; i > 0; i--)
+ for (int i = ssize(a) - 1; i > 0; i--)
a[i] = a[i-1] * inv[i] % mod;
a[0] = 0;
return a;
@@ -46,7 +46,7 @@ vector<ll> poly_exp(vector<ll> a, int n) {
for (int len = 1; len < n; len *= 2) {
vector<ll> p = poly_log(q, 2*len);
for (int i = 0; i < 2*len; i++)
- p[i] = (mod - p[i] + (i < sz(a) ? a[i] : 0)) % mod;
+ p[i] = (mod - p[i] + (i < ssize(a) ? a[i] : 0)) % mod;
vector<ll> q2 = q;
q2.resize(2*len);
ntt(p), ntt(q2);
diff --git a/content/math/transforms/xorTransform.cpp b/content/math/transforms/xorTransform.cpp
index f9d1d82..075aac3 100644
--- a/content/math/transforms/xorTransform.cpp
+++ b/content/math/transforms/xorTransform.cpp
@@ -1,5 +1,5 @@
void fft(vector<ll>& a, bool inv = false) {
- int n = sz(a);
+ int n = ssize(a);
for (int s = 1; s < n; s *= 2) {
for (int i = 0; i < n; i += 2 * s) {
for (int j = i; j < i + s; j++) {
diff --git a/content/other/fastIO.cpp b/content/other/fastIO.cpp
index 9badcc7..09473f4 100644
--- a/content/other/fastIO.cpp
+++ b/content/other/fastIO.cpp
@@ -16,7 +16,7 @@ void printPositive(int n) {
}
void fastprint(int n) {
- if(n == 0) {putchar('0'); return;}
+ if(n == 0) { putchar('0'); return; }
if (n < 0) {
putchar('-');
printPositive(-n);
diff --git a/content/other/fastSubsetSum.cpp b/content/other/fastSubsetSum.cpp
index 84396f6..38a84b6 100644
--- a/content/other/fastSubsetSum.cpp
+++ b/content/other/fastSubsetSum.cpp
@@ -1,11 +1,11 @@
int fastSubsetSum(vector<int> w, int t){
int a = 0, b = 0;
- while(b < sz(w) && a + w[b] <= t) a += w[b++];
- if(b == sz(w)) return a;
- int m = *max_element(all(w));
+ while(b < ssize(w) && a + w[b] <= t) a += w[b++];
+ if(b == ssize(w)) return a;
+ int m = *ranges::max_element(w);
vector<int> dp(2*m, -1), old;
dp[m+a-t] = b;
- for(int i = b; i < sz(w); i++){
+ for(int i = b; i < ssize(w); i++){
old = dp;
for(int j = 0; j < m; j++){
dp[j+w[i]] = max(dp[j+w[i]], old[j]);
@@ -18,4 +18,4 @@ int fastSubsetSum(vector<int> w, int t){
}
for(a = t; dp[m+a-t] < 0; a--);
return a;
-} \ No newline at end of file
+}
diff --git a/content/other/josephus2.cpp b/content/other/josephus2.cpp
index 33544ea..1c4295d 100644
--- a/content/other/josephus2.cpp
+++ b/content/other/josephus2.cpp
@@ -1,5 +1,5 @@
-int rotateLeft(int n) { // Der letzte Überlebende, 1-basiert.
+ll rotateLeft(ll n) { // Der letzte Überlebende, 0-basiert.
int bits = __lg(n);
- n ^= 1 << bits;
- return 2 * n + 1;
+ n ^= 1ll << bits;
+ return n << 1;
}
diff --git a/content/other/other.tex b/content/other/other.tex
index 191a6da..8896962 100644
--- a/content/other/other.tex
+++ b/content/other/other.tex
@@ -18,9 +18,9 @@
\begin{expandtable}
\begin{tabularx}{\linewidth}{|lR|}
\hline
- Addition & \code{__builtin_saddll_overflow(a, b, &c)} \\
- Subtraktion & \code{__builtin_ssubll_overflow(a, b, &c)} \\
- Multiplikation & \code{__builtin_smulll_overflow(a, b, &c)} \\
+ Addition & \code{__builtin_saddll_overflow(a, b, \&c)} \\
+ Subtraktion & \code{__builtin_ssubll_overflow(a, b, \&c)} \\
+ Multiplikation & \code{__builtin_smulll_overflow(a, b, \&c)} \\
\hline
\end{tabularx}
\end{expandtable}
@@ -30,9 +30,9 @@
\begin{expandtable}
\begin{tabularx}{\linewidth}{|Ll|}
\hline
- Bit an Position j lesen & \code{(x & (1 << j)) != 0} \\
- Bit an Position j setzten & \code{x |= (1 << j)} \\
- Bit an Position j löschen & \code{x &= ~(1 << j)} \\
+ Bit an Position j lesen & \code{(x \& (1 << j)) != 0} \\
+ Bit an Position j setzen & \code{x |= (1 << j)} \\
+ Bit an Position j löschen & \code{x \&= ~(1 << j)} \\
Bit an Position j flippen & \code{x ^= (1 << j)} \\
Anzahl an führenden nullen ($x \neq 0$) & \code{__builtin_clzll(x)} \\
Anzahl an schließenden nullen ($x \neq 0$) & \code{__builtin_ctzll(x)} \\
@@ -67,9 +67,7 @@
\paragraph{Quadrangle inequality} Die Bedingung $\forall a\leq b\leq c\leq d:
C[a][d] + C[b][c] \geq C[a][c] + C[b][d]$ ist hinreichend für beide Optimierungen.
- \paragraph{Sum over Subsets DP} $\text{res}[\text{mask}]=\sum_{i\subseteq\text{mask}}\text{in}[i]$.
- Für Summe über Supersets \code{res} einmal vorher und einmal nachher reversen.
- \sourcecode{other/sos.cpp}
+ \paragraph{Sum over Subsets DP} Siehe \emph{or} Transform, Seite \pageref{fft}.
\end{algorithm}
\begin{algorithm}{Fast Subset Sum}
@@ -82,12 +80,12 @@
\sourcecode{other/pbs.cpp}
\end{algorithm}
+\columnbreak
\begin{algorithm}{Josephus-Problem}
$n$ Personen im Kreis, jeder $k$-te wird erschossen.
\begin{description}
\item[Spezialfall $\boldsymbol{k=2}$:] Betrachte $n$ Binär.
- Für $n = 1b_1b_2b_3..b_n$ ist $b_1b_2b_3..b_n1$ die Position des letzten Überlebenden.
- (Rotiere $n$ um eine Stelle nach links)
+ Für $n = 1b_1b_2b_3..b_n$ ist $b_1b_2b_3..b_n0$ die Position des letzten Überlebenden.
\end{description}
\sourcecode{other/josephus2.cpp}
@@ -98,7 +96,6 @@
Also: $F(n,k) = (F(n-1,k)+k)\%n$. Basisfall: $F(1,k) = 0$.
\end{description}
\sourcecode{other/josephusK.cpp}
- \textbf{Beachte bei der Ausgabe, dass die Personen im ersten Fall von $\boldsymbol{1, \ldots, n}$ nummeriert sind, im zweiten Fall von $\boldsymbol{0, \ldots, n-1}$!}
\end{algorithm}
\begin{algorithm}[optional]{Zeileneingabe}
@@ -127,7 +124,7 @@
c'(u,v)&=c(u,v)-d(u,v)&c'(t,s)&=x
\end{align*}
Löse Fluss auf $G'$ mit \textsc{Dinic's Algorithmus}, wenn alle Kanten von $s'$ saturiert sind ist der Fluss in $G$ gültig. $x$ beschränkt den Fluss in $G$ (Binary-Search für minflow, $\infty$ sonst).
- \item \textbf{\textsc{Johnsons} Reweighting Algorithmus:}
+ \item \textbf{\textsc{Johnson}s Reweighting Algorithm:}
Initialisiere alle Entfernungen mit \texttt{d[i] = 0}. Berechne mit \textsc{Bellmann-Ford} kürzeste Entfernungen.
Falls es einen negativen Zyklus gibt abrrechen.
Sonst ändere die Gewichte von allen Kanten \texttt{(u,v)} im ursprünglichen Graphen zu \texttt{d[u]+w[u,v]-d[v]}.
@@ -186,8 +183,8 @@
[X/G] = \frac{1}{\vert G \vert} \sum_{g \in G} m^{\#(g)}
\]
- \item \textbf{Verteilung von Primzahlen:}
- Für alle $n \in \mathbb{N}$ gilt: Ex existiert eine Primzahl $p$ mit $n \leq p \leq 2n$.
+ \item \textbf{\textsc{Bertrand}sches Postulat:}
+ Für alle $n \in \mathbb{N}$ gilt: Ex existiert eine Primzahl $p$ mit $n < p \leq 2n$.
\item \textbf{Satz von \textsc{Kirchhoff}:}
Sei $G$ ein zusammenhängender, ungerichteter Graph evtl. mit Mehrfachkanten.
@@ -199,7 +196,7 @@
\newline
Entferne letzte Zeile und Spalte und berechne Betrag der Determinante.
- \item \textbf{\textsc{Dilworths}-Theorem:}
+ \item \textbf{\textsc{Dilworth}'s Theorem:}
Sei $S$ eine Menge und $\leq$ eine partielle Ordnung ($S$ ist ein Poset).
Eine \emph{Kette} ist eine Teilmenge $\{x_1,\ldots,x_n\}$ mit $x_1 \leq \ldots \leq x_n$.
Eine \emph{Partition} ist eine Menge von Ketten, sodass jedes $s \in S$ in genau einer Kette ist.
@@ -211,15 +208,15 @@
Berechnung: Maximales Matching in bipartitem Graphen.
Dupliziere jedes $s \in S$ in $u_s$ und $v_s$.
Falls $x \leq y$, füge Kante $u_x \to v_y$ hinzu.
- Wenn Matching zu langsam ist, versuche Struktur des Posets auszunutzen und evtl. anders eine maximale Anitkette zu finden.
+ Wenn Matching zu langsam ist, versuche Struktur des Posets auszunutzen und evtl. anders eine maximale Antikette zu finden.
- \item \textbf{\textsc{Turan}'s-Theorem:}
+ \item \textbf{\textsc{Tur\'an}'s Theorem:}
Die Anzahl an Kanten in einem Graphen mit $n$ Knoten der keine clique der größe $x+1$ enthält ist:
\begin{align*}
ext(n, K_{x+1}) &= \binom{n}{2} - \left[\left(x - (n \bmod x)\right) \cdot \binom{\floor{\frac{n}{x}}}{2} + \left(n\bmod x\right) \cdot \binom{\ceil{\frac{n}{x}}}{2}\right]
\end{align*}
- \item \textbf{\textsc{Euler}'s-Polyedersatz:}
+ \item \textbf{\textsc{Euler}scher Polyedersatz:}
In planaren Graphen gilt $n-m+f-c=1$.
\item \textbf{\textsc{Pythagoreische Tripel}:}
diff --git a/content/other/pbs.cpp b/content/other/pbs.cpp
index f4db2fd..e6bfeac 100644
--- a/content/other/pbs.cpp
+++ b/content/other/pbs.cpp
@@ -7,7 +7,7 @@ while (true) {
focus.emplace_back((low[i] + high[i]) / 2, i);
}}
if (focus.empty()) break;
- sort(all(focus));
+ ranges::sort(focus);
// reset simulation
for (int step = 0; auto [mid, i] : focus) {
diff --git a/content/other/sos.cpp b/content/other/sos.cpp
deleted file mode 100644
index 01bc44c..0000000
--- a/content/other/sos.cpp
+++ /dev/null
@@ -1,6 +0,0 @@
-vector<ll> res(in);
-for (int i = 1; i < sz(res); i *= 2) {
- for (int mask = 0; mask < sz(res); mask++){
- if (mask & i) {
- res[mask] += res[mask ^ i];
-}}}
diff --git a/content/other/timed.cpp b/content/other/timed.cpp
index b3ed4ef..a3ede29 100644
--- a/content/other/timed.cpp
+++ b/content/other/timed.cpp
@@ -1,3 +1,3 @@
int times = clock();
//run for 900ms
-while (1000*(clock()-times)/CLOCKS_PER_SEC < 900) {...}
+while (1000*(clock()-times)/CLOCKS_PER_SEC < 900) { ... }
diff --git a/content/string/ahoCorasick.cpp b/content/string/ahoCorasick.cpp
index 390d16d..d738961 100644
--- a/content/string/ahoCorasick.cpp
+++ b/content/string/ahoCorasick.cpp
@@ -4,7 +4,8 @@ struct AhoCorasick {
int suffix = 0, ch, cnt = 0;
array<int, ALPHABET_SIZE> nxt = {};
- vert(int p, int c) : suffix(-p), ch(c) {fill(all(nxt), -1);}
+ vert(int p, int c):
+ suffix(-p), ch(c) { ranges::fill(nxt, -1); }
};
vector<vert> aho = {{0, -1}};
@@ -13,7 +14,7 @@ struct AhoCorasick {
for (auto c : s) {
int idx = c - OFFSET;
if (aho[v].nxt[idx] == -1) {
- aho[v].nxt[idx] = sz(aho);
+ aho[v].nxt[idx] = ssize(aho);
aho.emplace_back(v, idx);
}
v = aho[v].nxt[idx];
@@ -37,9 +38,9 @@ struct AhoCorasick {
vector<vector<int>> adj;
vector<ll> dp;
void buildGraph() {
- adj.resize(sz(aho));
- dp.assign(sz(aho), 0);
- for (int i = 1; i < sz(aho); i++) {
+ adj.resize(ssize(aho));
+ dp.assign(ssize(aho), 0);
+ for (int i = 1; i < ssize(aho); i++) {
adj[getSuffix(i)].push_back(i);
}}
diff --git a/content/string/deBruijn.cpp b/content/string/deBruijn.cpp
index e829137..545dde7 100644
--- a/content/string/deBruijn.cpp
+++ b/content/string/deBruijn.cpp
@@ -1,7 +1,7 @@
string deBruijn(int n, char mi = '0', char ma = '1') {
string res, c(1, mi);
do {
- if (n % sz(c) == 0) res += c;
+ if (n % ssize(c) == 0) res += c;
} while(next(c, n, mi, ma));
return res;
}
diff --git a/content/string/duval.cpp b/content/string/duval.cpp
index 253bae1..de94ebd 100644
--- a/content/string/duval.cpp
+++ b/content/string/duval.cpp
@@ -1,8 +1,8 @@
vector<pair<int, int>> duval(const string& s) {
vector<pair<int, int>> res;
- for (int i = 0; i < sz(s);) {
+ for (int i = 0; i < ssize(s);) {
int j = i + 1, k = i;
- for (; j < sz(s) && s[k] <= s[j]; j++) {
+ for (; j < ssize(s) && s[k] <= s[j]; j++) {
if (s[k] < s[j]) k = i;
else k++;
}
@@ -15,5 +15,5 @@ vector<pair<int, int>> duval(const string& s) {
int minrotation(const string& s) {
auto parts = duval(s+s);
for (auto [l, r] : parts) {
- if (r >= sz(s)) return l;
+ if (r >= ssize(s)) return l;
}}
diff --git a/content/string/kmp.cpp b/content/string/kmp.cpp
index 421479e..a354aa7 100644
--- a/content/string/kmp.cpp
+++ b/content/string/kmp.cpp
@@ -1,7 +1,7 @@
vector<int> kmpPreprocessing(const string& sub) {
- vector<int> b(sz(sub) + 1);
+ vector<int> b(ssize(sub) + 1);
b[0] = -1;
- for (int i = 0, j = -1; i < sz(sub);) {
+ for (int i = 0, j = -1; i < ssize(sub);) {
while (j >= 0 && sub[i] != sub[j]) j = b[j];
b[++i] = ++j;
}
@@ -9,10 +9,10 @@ vector<int> kmpPreprocessing(const string& sub) {
}
vector<int> kmpSearch(const string& s, const string& sub) {
vector<int> result, pre = kmpPreprocessing(sub);
- for (int i = 0, j = 0; i < sz(s);) {
+ for (int i = 0, j = 0; i < ssize(s);) {
while (j >= 0 && s[i] != sub[j]) j = pre[j];
i++; j++;
- if (j == sz(sub)) {
+ if (j == ssize(sub)) {
result.push_back(i - j);
j = pre[j];
}}
diff --git a/content/string/longestCommonSubsequence.cpp b/content/string/longestCommonSubsequence.cpp
index 6c9ea44..14ca62c 100644
--- a/content/string/longestCommonSubsequence.cpp
+++ b/content/string/longestCommonSubsequence.cpp
@@ -1,12 +1,12 @@
string lcss(const string& a, const string& b) {
- vector<vector<int>> m(sz(a) + 1, vector<int>(sz(b) + 1));
- for (int i = sz(a) - 1; i >= 0; i--) {
- for (int j = sz(b) - 1; j >= 0; j--) {
+ vector<vector<int>> m(ssize(a) + 1, vector<int>(ssize(b) + 1));
+ for (int i = ssize(a) - 1; i >= 0; i--) {
+ for (int j = ssize(b) - 1; j >= 0; j--) {
if (a[i] == b[j]) m[i][j] = 1 + m[i+1][j+1];
else m[i][j] = max(m[i+1][j], m[i][j+1]);
}} // Für die Länge: return m[0][0];
string res;
- for (int j = 0, i = 0; j < sz(b) && i < sz(a);) {
+ for (int j = 0, i = 0; j < ssize(b) && i < ssize(a);) {
if (a[i] == b[j]) res += a[i++], j++;
else if (m[i][j+1] > m[i+1][j]) j++;
else i++;
diff --git a/content/string/lyndon.cpp b/content/string/lyndon.cpp
index e44379b..cb477d4 100644
--- a/content/string/lyndon.cpp
+++ b/content/string/lyndon.cpp
@@ -1,5 +1,5 @@
bool next(string& s, int maxLen, char mi = '0', char ma = '1') {
- for (int i = sz(s), j = sz(s); i < maxLen; i++)
+ for (int i = ssize(s), j = ssize(s); i < maxLen; i++)
s.push_back(s[i % j]);
while(!s.empty() && s.back() == ma) s.pop_back();
if (s.empty()) {
diff --git a/content/string/manacher.cpp b/content/string/manacher.cpp
index 112bd55..9fa2991 100644
--- a/content/string/manacher.cpp
+++ b/content/string/manacher.cpp
@@ -1,9 +1,9 @@
vector<int> manacher(const string& t) {
//transforms "aa" to ".a.a." to find even length palindromes
- string s(sz(t) * 2 + 1, '.');
- for (int i = 0; i < sz(t); i++) s[2 * i + 1] = t[i];
+ string s(ssize(t) * 2 + 1, '.');
+ for (int i = 0; i < ssize(t); i++) s[2 * i + 1] = t[i];
- int mid = 0, r = 0, n = sz(s);
+ int mid = 0, r = 0, n = ssize(s);
vector<int> pal(n);
for (int i = 1; i < n - 1; i++) {
if (r > i) pal[i] = min(r - i, pal[2 * mid - i]);
diff --git a/content/string/rollingHash.cpp b/content/string/rollingHash.cpp
index 6e914aa..1157cb7 100644
--- a/content/string/rollingHash.cpp
+++ b/content/string/rollingHash.cpp
@@ -14,5 +14,5 @@ struct Hash {
return (pref[r] - mul(power[r-l], pref[l]) + M) % M;
}
- static ll mul(__int128 a, ll b) {return a * b % M;}
+ static ll mul(__int128 a, ll b) { return a * b % M; }
};
diff --git a/content/string/rollingHashCf.cpp b/content/string/rollingHashCf.cpp
index 84b2e4e..c08a9d3 100644
--- a/content/string/rollingHashCf.cpp
+++ b/content/string/rollingHashCf.cpp
@@ -13,5 +13,5 @@ struct Hash {
return (pref[r] - mul(power[r-l], pref[l]) + M) % M;
}
- static ll mul(__int128 a, ll b) {return a * b % M;}
+ static ll mul(__int128 a, ll b) { return a * b % M; }
};
diff --git a/content/string/string.tex b/content/string/string.tex
index bedabfb..0e482bf 100644
--- a/content/string/string.tex
+++ b/content/string/string.tex
@@ -63,21 +63,21 @@
\end{algorithm}
\clearpage
-\begin{algorithm}{Lyndon und De-Bruijn}
+\begin{algorithm}{\textsc{Lyndon} und \textsc{De-Bruijn}}
\begin{itemize}
- \item \textbf{Lyndon-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen.
- \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von Lyndon-Worten zerlegt werden.
- \item Für Lyndon-Worte $u, v$ mit $u<v$ gilt, dass $uv$ auch ein Lyndon-Wort ist.
+ \item \textbf{\textsc{Lyndon}-Wort:} Ein Wort das lexikographisch kleiner ist als jede seiner Rotationen.
+ \item Jedes Wort kann \emph{eindeutig} in eine nicht ansteigende Folge von \textsc{Lyndon}-Worten zerlegt werden.
+ \item Für \textsc{Lyndon}-Worte $u, v$ mit $u<v$ gilt, dass $uv$ auch ein \textsc{Lyndon}-Wort ist.
\end{itemize}
\begin{methods}
- \method[, Durchschnitt $\Theta(1)$]{next}{lexikographisch nächstes Lyndon-Wort}{n}
- \method{duval}{zerlegt $s$ in Lyndon-Worte}{n}
+ \method[, Durchschnitt $\Theta(1)$]{next}{lexikographisch nächstes \textsc{Lyndon}-Wort}{n}
+ \method{duval}{zerlegt $s$ in \textsc{Lyndon}-Worte}{n}
\method{minrotation}{berechnet kleinste Rotation von $s$}{n}
\end{methods}
\sourcecode{string/lyndon.cpp}
\sourcecode{string/duval.cpp}
\begin{itemize}
- \item \textbf{De-Bruijn-Sequenze $\boldsymbol{B(\Sigma, n)}$:}~~~ein Wort das jedes Wort der Länge $n$ genau einmal als substring enthält (und minimal ist). Wobei $B(\Sigma, n)$ zyklisch betrachtet wird.
+ \item \textbf{\textsc{De-Bruijn}-Sequenz $\boldsymbol{B(\Sigma, n)}$:}~~~ein Wort das jedes Wort der Länge $n$ genau einmal als substring enthält (und minimal ist). Wobei $B(\Sigma, n)$ zyklisch betrachtet wird.
\item es gibt $\frac{(k!)^{k^{n-1}}}{k^{n}}$ verschiedene $B(\Sigma, n)$
\item $B(\Sigma, n)$ hat Länge $\abs{\Sigma}^n$
\end{itemize}
diff --git a/content/string/suffixArray.cpp b/content/string/suffixArray.cpp
index 8b698d2..65bbb38 100644
--- a/content/string/suffixArray.cpp
+++ b/content/string/suffixArray.cpp
@@ -4,22 +4,22 @@ struct SuffixArray {
vector<int> SA, LCP;
vector<vector<int>> P;
- SuffixArray(const string& s) : n(sz(s)), SA(n), LCP(n),
+ SuffixArray(const string& s) : n(ssize(s)), SA(n), LCP(n),
P(__lg(2 * n - 1) + 1, vector<int>(n)) {
- P[0].assign(all(s));
- iota(all(SA), 0);
- sort(all(SA), [&](int a, int b) {return s[a] < s[b];});
+ P[0].assign(begin(s), end(s));
+ iota(begin(SA), end(SA), 0);
+ ranges::sort(SA, {}, [&](int x) { return s[x]; });
vector<int> x(n);
for (int k = 1, c = 1; c < n; k++, c *= 2) {
- iota(all(x), n - c);
+ iota(begin(x), end(x), n - c);
for (int ptr = c; int i : SA) if (i >= c) x[ptr++] = i - c;
vector<int> cnt(k == 1 ? MAX_CHAR : n);
for (int i : P[k-1]) cnt[i]++;
- partial_sum(all(cnt), begin(cnt));
+ partial_sum(begin(cnt), end(cnt), begin(cnt));
for (int i : x | views::reverse) SA[--cnt[P[k-1][i]]] = i;
- auto p = [&](int i) {return i < n ? P[k-1][i] : -1;};
+ auto p = [&](int i) { return i < n ? P[k-1][i] : -1; };
for (int i = 1; i < n; i++) {
int a = SA[i-1], b = SA[i];
P[k][b] = P[k][a] + (p(a) != p(b) || p(a+c) != p(b+c));
@@ -27,10 +27,11 @@ struct SuffixArray {
for (int i = 1; i < n; i++) LCP[i] = lcp(SA[i-1], SA[i]);
}
- int lcp(int x, int y) {//x & y are text-indices, not SA-indices
+ // x & y are text-indices, not SA-indices
+ int lcp(int x, int y) {
if (x == y) return n - x;
int res = 0;
- for (int i = sz(P) - 1; i >= 0 && max(x, y) + res < n; i--) {
+ for (int i = ssize(P)-1; i >= 0 && max(x, y) + res < n; i--){
if (P[i][x + res] == P[i][y + res]) res |= 1 << i;
}
return res;
diff --git a/content/string/suffixAutomaton.cpp b/content/string/suffixAutomaton.cpp
index 9a68cb3..f9aa80b 100644
--- a/content/string/suffixAutomaton.cpp
+++ b/content/string/suffixAutomaton.cpp
@@ -4,20 +4,20 @@ struct SuffixAutomaton {
struct State {
int len, link = -1;
array<int, ALPHABET_SIZE> nxt; // map if large Alphabet
- State(int l) : len(l) {fill(all(nxt), -1);}
+ State(int l): len(l) { ranges::fill(nxt, -1); }
};
vector<State> st = {State(0)};
int cur = 0;
SuffixAutomaton(const string& s) {
- st.reserve(2 * sz(s));
+ st.reserve(2 * ssize(s));
for (auto c : s) extend(c - OFFSET);
}
void extend(int c) {
int p = cur;
- cur = sz(st);
+ cur = ssize(st);
st.emplace_back(st[p].len + 1);
for (; p != -1 && st[p].nxt[c] < 0; p = st[p].link) {
st[p].nxt[c] = cur;
@@ -33,9 +33,9 @@ struct SuffixAutomaton {
st.back().link = st[q].link;
st.back().nxt = st[q].nxt;
for (; p != -1 && st[p].nxt[c] == q; p = st[p].link) {
- st[p].nxt[c] = sz(st) - 1;
+ st[p].nxt[c] = ssize(st) - 1;
}
- st[q].link = st[cur].link = sz(st) - 1;
+ st[q].link = st[cur].link = ssize(st) - 1;
}}}
vector<int> calculateTerminals() {
@@ -49,7 +49,7 @@ struct SuffixAutomaton {
// Pair with start index (in t) and length of LCS.
pair<int, int> longestCommonSubstring(const string& t) {
int v = 0, l = 0, best = 0, bestp = -1;
- for (int i = 0; i < sz(t); i++) {
+ for (int i = 0; i < ssize(t); i++) {
int c = t[i] - OFFSET;
while (v > 0 && st[v].nxt[c] < 0) {
v = st[v].link;
diff --git a/content/string/suffixTree.cpp b/content/string/suffixTree.cpp
index 7112f39..6362c3e 100644
--- a/content/string/suffixTree.cpp
+++ b/content/string/suffixTree.cpp
@@ -11,12 +11,12 @@ struct SuffixTree {
SuffixTree(const string& s_) : s(s_) {
needsSuffix = remainder = curVert = curEdge = curLen = 0;
pos = -1;
- for (int i = 0; i < sz(s); i++) extend();
+ for (int i = 0; i < ssize(s); i++) extend();
}
int newVert(int start, int end) {
tree.push_back({start, end, 0, {}});
- return sz(tree) - 1;
+ return ssize(tree) - 1;
}
void addSuffixLink(int vert) {
@@ -42,7 +42,7 @@ struct SuffixTree {
while (remainder) {
if (curLen == 0) curEdge = pos;
if (!tree[curVert].nxt.count(s[curEdge])) {
- int leaf = newVert(pos, sz(s));
+ int leaf = newVert(pos, ssize(s));
tree[curVert].nxt[s[curEdge]] = leaf;
addSuffixLink(curVert);
} else {
@@ -56,7 +56,7 @@ struct SuffixTree {
int split = newVert(tree[nxt].start,
tree[nxt].start + curLen);
tree[curVert].nxt[s[curEdge]] = split;
- int leaf = newVert(pos, sz(s));
+ int leaf = newVert(pos, ssize(s));
tree[split].nxt[s[pos]] = leaf;
tree[nxt].start += curLen;
tree[split].nxt[s[tree[nxt].start]] = nxt;
@@ -69,4 +69,4 @@ struct SuffixTree {
} else {
curVert = tree[curVert].suf ? tree[curVert].suf : 0;
}}}
-}; \ No newline at end of file
+};
diff --git a/content/string/trie.cpp b/content/string/trie.cpp
index 03cf947..db39c43 100644
--- a/content/string/trie.cpp
+++ b/content/string/trie.cpp
@@ -3,7 +3,7 @@ constexpr int ALPHABET_SIZE = 2;
struct node {
int words, ends;
array<int, ALPHABET_SIZE> nxt;
- node() : words(0), ends(0) {fill(all(nxt), -1);}
+ node(): words(0), ends(0) { ranges::fill(nxt, -1); }
};
vector<node> trie = {node()};
@@ -13,7 +13,7 @@ int traverse(const vector<int>& word, int x) {
if (id < 0 || (trie[id].words == 0 && x <= 0)) return -1;
trie[id].words += x;
if (trie[id].nxt[c] < 0 && x > 0) {
- trie[id].nxt[c] = sz(trie);
+ trie[id].nxt[c] = ssize(trie);
trie.emplace_back();
}
id = trie[id].nxt[c];
diff --git a/content/string/z.cpp b/content/string/z.cpp
index 069fa38..0d8cafb 100644
--- a/content/string/z.cpp
+++ b/content/string/z.cpp
@@ -1,5 +1,5 @@
vector<int> Z(const string& s) {
- int n = sz(s);
+ int n = ssize(s);
vector<int> z(n);
for (int i = 1, x = 0; i < n; i++) {
z[i] = max(0, min(z[i - x], x + z[x] - i));
diff --git a/content/tcr.tex b/content/tcr.tex
index 6d849d5..46a9a6a 100644
--- a/content/tcr.tex
+++ b/content/tcr.tex
@@ -6,12 +6,14 @@
]{scrartcl}
% General information.
-\newcommand{\teamname}{Kindergarten Timelimit}
+\newcommand{\teamname}{Infinite Loopers}
\newcommand{\university}{Karlsruhe Institute of Technology}
% Options
\newif\ifoptional
-%\optionaltrue
+\ifdefined\OPTIONAL
+ \optionaltrue
+\fi
% Font encoding.
\usepackage[T1]{fontenc}
@@ -44,6 +46,7 @@
% Content.
\begin{multicols*}{3}
+ \raggedcolumns
\input{datastructures/datastructures}
\input{graph/graph}
\input{geometry/geometry}
@@ -54,12 +57,6 @@
\input{other/other}
\input{template/template}
\clearpage
- \ifodd\value{page}
- \else
- \null
- \thispagestyle{empty}
- \clearpage
- \fi
\input{tests/test}
\end{multicols*}
\end{document}
diff --git a/content/template/template.cpp b/content/template/template.cpp
index 7430d23..7c92f09 100644
--- a/content/template/template.cpp
+++ b/content/template/template.cpp
@@ -1,17 +1,15 @@
#include <bits/stdc++.h>
using namespace std;
-#define tsolve int t; cin >> t; while(t--) solve
-#define all(x) ::begin(x), ::end(x)
-#define sz(x) (ll)::size(x)
-
+using ii = pair<int, int>;
+using vi = vector<int>;
using ll = long long;
using ld = long double;
-void solve() {}
+void solve() {
+}
int main() {
- cin.tie(0)->sync_with_stdio(false);
- cout << setprecision(16);
+ cin.tie(0)->sync_with_stdio(0);
solve();
}
diff --git a/tcr.pdf b/tcr.pdf
deleted file mode 100644
index 76c898a..0000000
--- a/tcr.pdf
+++ /dev/null
Binary files differ
diff --git a/test/GNUmakefile b/test/GNUmakefile
new file mode 100644
index 0000000..5e57930
--- /dev/null
+++ b/test/GNUmakefile
@@ -0,0 +1,36 @@
+
+TESTS = $(basename $(shell find . -path ./awk -prune -o -type f -name '*.cpp' -print))
+AWK = $(basename $(shell find . -type f -name '*.awk'))
+CXX = g++ -std=gnu++20 -I awk/ -I ../content/ -O2 -Wall -Wextra -Wshadow -Werror
+
+test: $(TESTS:=.ok)
+
+missing:
+ @find ../content -name '*.cpp' | sed 's|^../content/||' \
+ | while read -r f ; do [ -e "$$f" ] || echo "$$f" ; done \
+ | sort > missing.tmp
+ @sort missing.ignore | comm -3 missing.tmp -
+ @rm missing.tmp
+
+clean:
+ rm -f $(TESTS:=.test) $(TESTS:=.ok) $(TESTS:=.d)
+ rm -rf awk/
+
+%.ok: %.test
+ timeout --foreground --verbose 60 prlimit -s$$((1<<32)) ./$<
+ @touch $@
+
+%.test: %.cpp
+ $(CXX) -o $@ $<
+
+awk/%: %.awk ../content/%
+ @mkdir -p $(dir $@)
+ awk -f $*.awk < ../content/$* > $@
+
+%.d: %.cpp $(addprefix awk/,$(AWK))
+ $(CXX) -M -MT '$*.test $*.d' -MF $@ $<
+
+.PHONY: test clean
+.SECONDARY: $(TESTS:=.test) $(addprefix awk/,$(AWK))
+
+include $(TESTS:=.d)
diff --git a/test/datastructures/LCT.cpp b/test/datastructures/LCT.cpp
index 58d76d7..68a952c 100644
--- a/test/datastructures/LCT.cpp
+++ b/test/datastructures/LCT.cpp
@@ -73,13 +73,13 @@ struct Naive {
}
};
dfs_comp(dfs_comp, x);
- return seen[Random::integer<int>(sz(seen))];
+ return seen[Random::integer<int>(ssize(seen))];
}
int randomAdj(int x) {
if (adj[x].empty()) return -1;
- vector<int> seen(all(adj[x]));
- return seen[Random::integer<int>(sz(seen))];
+ vector<int> seen(begin(adj[x]), end(adj[x]));
+ return seen[Random::integer<int>(ssize(seen))];
}
};
@@ -179,7 +179,7 @@ void performance_test() {
int a = Random::integer<int>(0, N);
int b = Random::integer<int>(0, N);
ll w = Random::integer<ll>(-1000, 1000);
-
+
t.start();
if (!lct.connected(&lct.nodes[a], &lct.nodes[b])) {
lct.link(&lct.nodes[a], &lct.nodes[b]);
diff --git a/test/datastructures/dynamicConvexHull.lichao.cpp b/test/datastructures/dynamicConvexHull.lichao.cpp
index d50ca60..9a6ffb9 100644
--- a/test/datastructures/dynamicConvexHull.lichao.cpp
+++ b/test/datastructures/dynamicConvexHull.lichao.cpp
@@ -8,7 +8,7 @@ void stress_test(ll range) {
for (int tries = 0; tries < 1000; tries++) {
int n = Random::integer<int>(1, 100);
xs = Random::distinct(n, -range, range);
- sort(all(xs));
+ ranges::sort(xs);
HullDynamic hd;
Lichao lichao;
diff --git a/test/datastructures/fenwickTree.cpp b/test/datastructures/fenwickTree.cpp
index c1ef6bf..f2a490a 100644
--- a/test/datastructures/fenwickTree.cpp
+++ b/test/datastructures/fenwickTree.cpp
@@ -23,7 +23,7 @@ void stress_test() {
int i = Random::integer<int>(0, n);
ll got = prefix_sum(i);
ll expected = 0;
- for (int j = 0; j <= i; j++) expected += naive[j];
+ for (int j = 0; j < i; j++) expected += naive[j];
if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
}
}
@@ -42,7 +42,7 @@ void performance_test() {
int i = Random::integer<int>(0, N);
int j = Random::integer<int>(0, N);
ll x = Random::integer<ll>(-1000, 1000);
-
+
t.start();
update(i, x);
hash ^= prefix_sum(j);
diff --git a/test/datastructures/fenwickTree2.cpp b/test/datastructures/fenwickTree2.cpp
index 89d5b0f..bc0753f 100644
--- a/test/datastructures/fenwickTree2.cpp
+++ b/test/datastructures/fenwickTree2.cpp
@@ -23,7 +23,7 @@ void stress_test() {
int i = Random::integer<int>(0, n);
ll got = prefix_sum(i);
ll expected = 0;
- for (int j = 0; j <= i; j++) expected += naive[j];
+ for (int j = 0; j < i; j++) expected += naive[j];
if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
}
}
@@ -44,7 +44,7 @@ void performance_test() {
int j = Random::integer<int>(0, N);
int k = Random::integer<int>(0, N);
ll x = Random::integer<ll>(-1000, 1000);
-
+
t.start();
update(i, j, x);
hash ^= prefix_sum(k);
diff --git a/test/datastructures/lazyPropagation.cpp b/test/datastructures/lazyPropagation.cpp
index feb07f0..16db945 100644
--- a/test/datastructures/lazyPropagation.cpp
+++ b/test/datastructures/lazyPropagation.cpp
@@ -34,6 +34,39 @@ void stress_test() {
cerr << "tested random queries: " << queries << endl;
}
+void stress_test_binary_search() {
+ ll queries = 0;
+ for (int tries = 0; tries < 100; tries++) {
+ int n = Random::integer<int>(10, 100);
+ vector<ll> naive = Random::integers<ll>(n, 0, 1000);
+ SegTree tree(naive);
+ for (int operations = 0; operations < 1000; operations++) {
+ {
+ int l = Random::integer<int>(0, n + 1);
+ int r = Random::integer<int>(0, n + 1);
+ //if (l > r) swap(l, r);
+ ll x = Random::integer<ll>(0, 1000);
+ tree.update(l, r, x);
+ for (int j = l; j < r; j++) naive[j] = x;
+ }
+ {
+ queries++;
+ int l = Random::integer<int>(0, n + 1);
+ int r = Random::integer<int>(0, n + 1);
+ ll x = Random::integer<ll>(0, 10000);
+ //if (l > r) swap(l, r);
+ int got = tree.binary_search(l, r, [x](ll v) { return v >= x; });
+ ll sum;
+ int j;
+ for (j = l, sum = 0; j < r && sum < x; j++) sum += naive[j];
+ int expected = sum >= x ? j : -1;
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ }
+ }
+ }
+ cerr << "tested random binary searches: " << queries << endl;
+}
+
void performance_test() {
timer t;
t.start();
@@ -45,7 +78,7 @@ void performance_test() {
auto [l1, r1] = Random::pair<int>(0, N + 1);
auto [l2, r2] = Random::pair<int>(0, N + 1);
ll x1 = Random::integer<ll>(-1000, 1000);
-
+
t.start();
tree.update(l1, r1, x1);
hash ^= tree.query(l2, r2);
@@ -55,7 +88,31 @@ void performance_test() {
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
+void performance_test_binary_search() {
+ timer t;
+ t.start();
+ vector<ll> tmp(N);
+ SegTree tree(tmp);
+ t.stop();
+ hash_t hash = 0;
+ for (int operations = 0; operations < N; operations++) {
+ auto [l1, r1] = Random::pair<int>(0, N + 1);
+ auto [l2, r2] = Random::pair<int>(0, N + 1);
+ ll x1 = Random::integer<ll>(0, 1000);
+ ll x2 = Random::integer<ll>(0, 1000 * N);
+
+ t.start();
+ tree.update(l1, r1, x1);
+ hash ^= tree.binary_search(l2, r2, [x2](ll v) { return v >= x2; });
+ t.stop();
+ }
+ if (t.time > 2000) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
int main() {
stress_test();
+ stress_test_binary_search();
performance_test();
+ performance_test_binary_search();
}
diff --git a/test/datastructures/lichao.cpp b/test/datastructures/lichao.cpp
index f4b797b..1639b3d 100644
--- a/test/datastructures/lichao.cpp
+++ b/test/datastructures/lichao.cpp
@@ -7,7 +7,7 @@ void stress_test(ll range) {
for (int tries = 0; tries < 1000; tries++) {
int n = Random::integer<int>(1, 100);
xs = Random::distinct<ll>(n, -range, range);
- sort(all(xs));
+ ranges::sort(xs);
vector<ll> naive(n, INF);
Lichao tree;
@@ -42,7 +42,7 @@ constexpr int N = 200'000;
void performance_test() {
timer t;
xs = Random::distinct<ll>(N, -1'000'000'000, 1'000'000'000);
- sort(all(xs));
+ ranges::sort(xs);
t.start();
Lichao tree;
diff --git a/test/datastructures/monotonicConvexHull.cpp b/test/datastructures/monotonicConvexHull.cpp
index 0415068..98d74f8 100644
--- a/test/datastructures/monotonicConvexHull.cpp
+++ b/test/datastructures/monotonicConvexHull.cpp
@@ -1,7 +1,5 @@
#include "../util.h"
-struct MCH {
- #include <datastructures/monotonicConvexHull.cpp>
-};
+#include <datastructures/monotonicConvexHull.cpp>
struct Line {
ll m, c;
@@ -14,12 +12,12 @@ void stress_test(ll range) {
for (int tries = 0; tries < 1000; tries++) {
int n = Random::integer<int>(1, 100);
auto ms = Random::integers<ll>(n, -range, range);
- sort(all(ms), greater<>{});
+ ranges::sort(ms | views::reverse);
auto cs = ms;
for (int l = 0, r = 0; l < n;) {
while (r < n && ms[l] == ms[r]) r++;
auto tmp = Random::distinct<ll>(r - l, -range, range);
- sort(all(tmp), greater<>{});
+ ranges::sort(tmp | views::reverse);
for (int c : tmp) {
cs[l] = c;
l++;
@@ -27,12 +25,12 @@ void stress_test(ll range) {
}
auto xs = Random::integers<ll>(n*100, -range*n, range*n);
- sort(all(xs));
+ ranges::sort(xs);
int i = 0;
vector<Line> naive;
- MCH mch;
+ Envelope mch;
for (int k = 0; k < n; k++) {
ll m = ms[k];
ll c = cs[k];
@@ -60,12 +58,12 @@ void stress_test_independent(ll range) {
for (int tries = 0; tries < 1000; tries++) {
int n = Random::integer<int>(1, 100);
auto ms = Random::integers<ll>(n, -range, range);
- sort(all(ms), greater<>{});
+ ranges::sort(ms | views::reverse);
auto cs = ms;
for (int l = 0, r = 0; l < n;) {
while (r < n && ms[l] == ms[r]) r++;
auto tmp = Random::distinct<ll>(r - l, -range, range);
- sort(all(tmp), greater<>{});
+ ranges::sort(tmp | views::reverse);
for (int c : tmp) {
cs[l] = c;
l++;
@@ -74,7 +72,7 @@ void stress_test_independent(ll range) {
vector<Line> naive;
- MCH mch;
+ Envelope mch;
for (int i = 0; i < n; i++) {
ll m = ms[i];
ll c = cs[i];
@@ -83,7 +81,7 @@ void stress_test_independent(ll range) {
naive.emplace_back(m, c);
auto xs = Random::integers<ll>(100, -range, range);
- sort(all(xs));
+ ranges::sort(xs);
auto tmp = mch;
for (auto x : xs) {
@@ -103,17 +101,17 @@ constexpr int N = 1'000'000;
void performance_test() {
timer t;
auto ms = Random::distinct<ll>(N, -1'000'000'000, 1'000'000'000);
- sort(all(ms), greater<>{});
+ ranges::sort(ms | views::reverse);
auto xs = Random::distinct<ll>(N, -1'000'000'000, 1'000'000'000);
- sort(all(xs));
- MCH mch;
+ ranges::sort(xs);
+ Envelope mch;
hash_t hash = 0;
for (int operations = 0; operations < N; operations++) {
ll c = Random::integer<ll>(-1'000'000'000, 1'000'000'000);
ll m = ms[operations];
ll x = xs[operations];
-
+
t.start();
mch.add(m, c);
hash += mch.query(x);
diff --git a/test/datastructures/pbds.cpp b/test/datastructures/pbds.cpp
deleted file mode 100644
index 9080332..0000000
--- a/test/datastructures/pbds.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
-#include "../util.h"
-#include <datastructures/pbds.cpp>
-
-int main() {
- Tree<int> t1, t2;
- swap(t1, t2);
- hashSet<int> s1, s2;
- swap(s1, s2);
- hashMap<int, int> m1, m2;
- swap(m1, m2);
-} \ No newline at end of file
diff --git a/test/datastructures/persistentArray.cpp b/test/datastructures/persistentArray.cpp
index 6712089..ef8e52b 100644
--- a/test/datastructures/persistentArray.cpp
+++ b/test/datastructures/persistentArray.cpp
@@ -24,19 +24,19 @@ void stress_test() {
cur[j] = x;
expected.emplace_back(t, cur);
} else if (op <= 16) {
- if (sz(expected) < 1) continue;
- int j = Random::integer<int>(0, sz(expected));
+ if (ssize(expected) < 1) continue;
+ int j = Random::integer<int>(0, ssize(expected));
for (int k = 0; k < m; k++) {
if (got.get(k, expected[j].first) != expected[j].second[k]) cerr << "got: " << got.get(k, expected[j].first) << ", expected: " << expected[j].second[k] << FAIL;
}
} else {
- if (sz(expected) < 1) continue;
- int j = Random::integer<int>(0, sz(expected));
+ if (ssize(expected) < 1) continue;
+ int j = Random::integer<int>(0, ssize(expected));
got.reset(expected[j].first);
expected.resize(j + 1);
cur = expected.back().second;
}
-
+
}
queries += n;
}
diff --git a/test/datastructures/segmentTree.cpp b/test/datastructures/segmentTree.cpp
index fbac13e..2473724 100644
--- a/test/datastructures/segmentTree.cpp
+++ b/test/datastructures/segmentTree.cpp
@@ -47,7 +47,7 @@ void performance_test1() {
int i = Random::integer<int>(0, N);
auto [l, r] = Random::pair<int>(0, N + 1);
ll x = Random::integer<ll>(-1000, 1000);
-
+
t.start();
tree.update(i, x);
hash ^= tree.query(l, r);
@@ -68,7 +68,7 @@ void stress_test2() {
vector<ll> naive(n);
SegTree tree(naive);
naive = Random::integers<ll>(n, -1000, 1000);
- copy(all(naive), tree.tree.begin() + n);
+ ranges::copy(naive, tree.tree.begin() + n);
for (int operations = 0; operations < 1000; operations++) {
{
int l = Random::integer<int>(0, n + 1);
@@ -102,7 +102,7 @@ void performance_test2() {
int i = Random::integer<int>(0, N);
auto [l, r] = Random::pair<int>(0, N + 1);
ll x = Random::integer<ll>(-1000, 1000);
-
+
t.start();
tree.modify(l, r, x);
hash ^= tree.query(i);
diff --git a/test/datastructures/sparseTable.cpp b/test/datastructures/sparseTable.cpp
index 7577694..843e962 100644
--- a/test/datastructures/sparseTable.cpp
+++ b/test/datastructures/sparseTable.cpp
@@ -8,13 +8,13 @@ void stress_test() {
int n = Random::integer<int>(1, 100);
vector<ll> naive = Random::integers<ll>(n, -1000, 1000);
SparseTable st;
- st.init(&naive);
+ st.init(naive);
for (int operations = 0; operations < 1000; operations++) {
queries++;
int l = Random::integer<int>(0, n+1);
int r = Random::integer<int>(0, n+1);
- ll got = st.queryIdempotent(l, r);
+ ll got = st.query(l, r);
ll expected = r <= l ? -1 : l;
for (int j = l; j < r; j++) {
if (naive[j] < naive[expected]) expected = j;
@@ -31,14 +31,14 @@ void performance_test() {
vector<ll> naive = Random::integers<ll>(N, -1000, 1000);
t.start();
SparseTable st;
- st.init(&naive);
+ st.init(naive);
t.stop();
hash_t hash = 0;
for (int operations = 0; operations < N; operations++) {
auto [l, r] = Random::pair<int>(0, N+1);
-
+
t.start();
- hash += st.queryIdempotent(l, r);
+ hash += st.query(l, r);
t.stop();
}
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
diff --git a/test/datastructures/sparseTableDisjoint.cpp b/test/datastructures/sparseTableDisjoint.cpp
index 77bb005..258f4db 100644
--- a/test/datastructures/sparseTableDisjoint.cpp
+++ b/test/datastructures/sparseTableDisjoint.cpp
@@ -7,7 +7,7 @@ void stress_test() {
int n = Random::integer<int>(1, 100);
vector<ll> naive = Random::integers<ll>(n, -1000, 1000);
DisjointST st;
- st.init(&naive);
+ st.init(naive);
for (int operations = 0; operations < 1000; operations++) {
queries++;
int l = Random::integer<int>(0, n+1);
@@ -28,12 +28,12 @@ void performance_test() {
vector<ll> naive = Random::integers<ll>(N, -1000, 1000);
t.start();
DisjointST st;
- st.init(&naive);
+ st.init(naive);
t.stop();
hash_t hash = 0;
for (int operations = 0; operations < N; operations++) {
auto [l, r] = Random::pair<int>(0, N+1);
-
+
t.start();
hash += st.query(l, r);
t.stop();
diff --git a/test/datastructures/stlHashMap.cpp b/test/datastructures/stlHashMap.cpp
deleted file mode 100644
index 77976fd..0000000
--- a/test/datastructures/stlHashMap.cpp
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "../util.h"
-#include <datastructures/stlHashMap.cpp>
-
-int main() {} \ No newline at end of file
diff --git a/test/datastructures/stlPriorityQueue.cpp b/test/datastructures/stlPriorityQueue.cpp
deleted file mode 100644
index 669f4d4..0000000
--- a/test/datastructures/stlPriorityQueue.cpp
+++ /dev/null
@@ -1,6 +0,0 @@
-#include "../util.h"
-#include <datastructures/stlPriorityQueue.cpp>
-
-int main() {
- test();
-} \ No newline at end of file
diff --git a/test/datastructures/stlPriorityQueue.cpp.awk b/test/datastructures/stlPriorityQueue.cpp.awk
deleted file mode 100644
index 99d0fb9..0000000
--- a/test/datastructures/stlPriorityQueue.cpp.awk
+++ /dev/null
@@ -1,37 +0,0 @@
-/auto/ {
- print "void test() {"
- print "pQueue<ll> pq, pq2;"
- print "pq.push(1);"
- print "pq.push(5);"
- print "pq.push(7);"
- print "pq2.push(2);"
- print "pq2.push(4);"
- print "pq2.push(8);"
-}
-END {
- print "if (pq.empty()) cerr << \"error: empty\" << FAIL;"
- print "if (pq.top() != 8) cerr << \"error, got: \" << pq.top() << \", expected: 8\" << FAIL;"
- print "pq.pop();"
- print "if (pq.empty()) cerr << \"error: empty\" << FAIL;"
- print "if (pq.top() != 7) cerr << \"error, got: \" << pq.top() << \", expected: 7\" << FAIL;"
- print "pq.pop();"
- print "if (pq.empty()) cerr << \"error: empty\" << FAIL;"
- print "if (pq.top() != 6) cerr << \"error, got: \" << pq.top() << \", expected: 6\" << FAIL;"
- print "pq.pop();"
- print "if (pq.empty()) cerr << \"error: empty\" << FAIL;"
- print "if (pq.top() != 5) cerr << \"error, got: \" << pq.top() << \", expected: 5\" << FAIL;"
- print "pq.pop();"
- print "if (pq.empty()) cerr << \"error: empty\" << FAIL;"
- print "if (pq.top() != 4) cerr << \"error, got: \" << pq.top() << \", expected: 4\" << FAIL;"
- print "pq.pop();"
- print "if (pq.empty()) cerr << \"error: empty\" << FAIL;"
- print "if (pq.top() != 2) cerr << \"error, got: \" << pq.top() << \", expected: 2\" << FAIL;"
- print "pq.pop();"
- print "if (pq.empty()) cerr << \"error: empty\" << FAIL;"
- print "if (pq.top() != 1) cerr << \"error, got: \" << pq.top() << \", expected: 1\" << FAIL;"
- print "pq.pop();"
- print "if (!pq.empty()) cerr << \"error, got: \" << pq.top() << \", expected: empty\" << FAIL;"
- print "cerr << \"testes example\" << endl;"
- print "}"
-}
-{ print }
diff --git a/test/datastructures/stlRope.cpp b/test/datastructures/stlRope.cpp
index 669f4d4..7405e4e 100644
--- a/test/datastructures/stlRope.cpp
+++ b/test/datastructures/stlRope.cpp
@@ -1,6 +1,6 @@
#include "../util.h"
-#include <datastructures/stlPriorityQueue.cpp>
+#include <datastructures/stlRope.cpp>
int main() {
test();
-} \ No newline at end of file
+}
diff --git a/test/datastructures/stlRope.cpp.awk b/test/datastructures/stlRope.cpp.awk
index e19b8fd..df7c361 100644
--- a/test/datastructures/stlRope.cpp.awk
+++ b/test/datastructures/stlRope.cpp.awk
@@ -20,7 +20,7 @@
print "vector<int> got, expected = {0,1,6,2,3,4,5,7};"
}
END {
- print " got.push_back(*it)"
+ print " got.push_back(*it);"
print "if (got != expected) cerr << \"error\" << endl;"
print "}"
}
diff --git a/test/datastructures/stlTree.cpp b/test/datastructures/stlTree.cpp
deleted file mode 100644
index 7bacbee..0000000
--- a/test/datastructures/stlTree.cpp
+++ /dev/null
@@ -1,2 +0,0 @@
-#include "../util.h"
-#include <datastructures/stlTree.cpp>
diff --git a/test/datastructures/treap.cpp b/test/datastructures/treap.cpp
index 2fc9d63..d93e0f4 100644
--- a/test/datastructures/treap.cpp
+++ b/test/datastructures/treap.cpp
@@ -26,14 +26,14 @@ void stress_test(int T, int n) {
if (a.empty()) is_ins = true;
if (is_ins) {
- int ind = Random::integer<int>(0, (int)sz(a)+1);
+ int ind = Random::integer<int>(0, (int)ssize(a)+1);
ll val = Random::integer((ll)-1e18, (ll)1e18+1);
t.insert(ind, val);
a.insert(a.begin() + ind, val);
ins--;
} else {
- int ind = Random::integer<int>(0, (int)sz(a));
- int cnt = Random::integer<int>(1, 1 + min<int>({(int)sz(a)-ind, rem, (int)sqrt(n)}));
+ int ind = Random::integer<int>(0, (int)ssize(a));
+ int cnt = Random::integer<int>(1, 1 + min<int>({(int)ssize(a)-ind, rem, (int)sqrt(n)}));
t.remove(ind, cnt);
a.erase(a.begin() + ind, a.begin() + ind + cnt);
rem -= cnt;
diff --git a/test/datastructures/waveletTree.cpp b/test/datastructures/waveletTree.cpp
index d294835..e70d57b 100644
--- a/test/datastructures/waveletTree.cpp
+++ b/test/datastructures/waveletTree.cpp
@@ -20,7 +20,7 @@ void stress_test() {
ll expected = -1;
if (x >= 0 && l + x < r) {
vector<ll> tmp(naive.begin() + l, naive.begin() + r);
- std::sort(all(tmp));
+ ranges::sort(tmp);
expected = tmp[x];
}
if (got != expected) {
@@ -59,7 +59,7 @@ void performance_test() {
auto [l2, r2] = Random::pair<int>(0, N + 1);
int x1 = Random::integer<ll>(l1, r1 + 1);
ll x2 = Random::integer<ll>(-1000, 1000);
-
+
t.start();
hash ^= tree.kth(l1, r1, x1);
hash ^= tree.countSmaller(l2, r2, x2);
diff --git a/test/fuzz.sh b/test/fuzz.sh
deleted file mode 100755
index c166506..0000000
--- a/test/fuzz.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/bash
-set -e
-cd "$(dirname "$0")"
-
-while true
-do
- seed="0"
- while [[ $seed == 0* ]]; do
- seed=$(tr -dc '0-9' </dev/random | head -c 18)
- done
- echo "Fuzz using seed: $seed"
- echo
- ./test.sh --seed=$seed "$@"
-done
diff --git a/test/geometry.h b/test/geometry.h
index 0167d5c..06520c7 100644
--- a/test/geometry.h
+++ b/test/geometry.h
@@ -26,7 +26,7 @@ namespace Random {
vector<ll> partition(ll n, std::size_t k){//min = 0;
n += k;
vector<ll> res = Random::distinct<ll>(k-1, 1, n);
- sort(all(res));
+ ranges::sort(res);
res.emplace_back(n);
ll last = 0;
for (std::size_t i = 0; i < k; i++) {
@@ -137,4 +137,4 @@ namespace Random {
while (ccw(a, b, c) == 0) c = integerPoint(range);
return {a, b, c};
}
-} \ No newline at end of file
+}
diff --git a/test/geometry/antipodalPoints.cpp b/test/geometry/antipodalPoints.cpp
index d20dfb6..013f43c 100644
--- a/test/geometry/antipodalPoints.cpp
+++ b/test/geometry/antipodalPoints.cpp
@@ -9,7 +9,7 @@ constexpr ll EPS = 0;
#include "../geometry.h"
vector<pair<int, int>> naive(vector<pt> ps) {
- ll n = sz(ps);
+ ll n = ssize(ps);
auto test = [&](int i, int j){
if (dot(ps[j] - ps[i], ps[i - 1] - ps[i]) <= 0) return false;
if (dot(ps[j] - ps[i], ps[i + 1] - ps[i]) <= 0) return false;
@@ -34,13 +34,13 @@ void stress_test(ll range) {
auto got = antipodalPoints(ps);
for (auto& [a, b] : got) if (a > b) swap(a, b);
- sort(all(got));
+ ranges::sort(got);
auto expected = naive(ps);
for (auto& [a, b] : expected) if (a > b) swap(a, b);
for (auto x : expected) {
- auto it = lower_bound(all(got), x);
+ auto it = ranges::lower_bound(got, x);
if (it == got.end() || *it != x) cerr << "error" << FAIL;
}
queries += n;
@@ -58,7 +58,7 @@ void performance_test() {
auto got = antipodalPoints(ps);
t.stop();
- hash_t hash = sz(got);
+ hash_t hash = ssize(got);
if (t.time > 50) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/geometry/circle.cpp b/test/geometry/circle.cpp
index 3d3d27d..dc975ff 100644
--- a/test/geometry/circle.cpp
+++ b/test/geometry/circle.cpp
@@ -46,9 +46,9 @@ void test_circleIntersection(ll range) {
auto got = circleIntersection(c1, r1, c2, r2);
- if (sz(got) != expectedCount(real(c1), imag(c1), r1, real(c2), imag(c2), r2)) cerr << "error: wrong count" << FAIL;
+ if (ssize(got) != expectedCount(real(c1), imag(c1), r1, real(c2), imag(c2), r2)) cerr << "error: wrong count" << FAIL;
- for (int i = 0; i < sz(got); i++) {
+ for (int i = 0; i < ssize(got); i++) {
for (int j = 0; j < i; j++) {
if (abs(got[i] - got[j]) < 1e-6) cerr << "error: identical" << FAIL;
}
@@ -58,7 +58,7 @@ void test_circleIntersection(ll range) {
if (float_error(abs(c1 - p), r1) > 1e-6) cerr << "error: 1" << FAIL;
if (float_error(abs(c2 - p), r2) > 1e-6) cerr << "error: 2" << FAIL;
}
- queries += sz(got);
+ queries += ssize(got);
}
cerr << "tested circleIntersection: " << queries << endl;
}
@@ -91,9 +91,9 @@ void test_circleRayIntersection(ll range) {
else expected = 1;
}
- if (sz(got) != expected) cerr << "error: wrong count" << FAIL;
+ if (ssize(got) != expected) cerr << "error: wrong count" << FAIL;
- for (int i = 0; i < sz(got); i++) {
+ for (int i = 0; i < ssize(got); i++) {
for (int j = 0; j < i; j++) {
if (abs(got[i] - got[j]) < 1e-6) cerr << "error: identical" << FAIL;
}
@@ -103,7 +103,7 @@ void test_circleRayIntersection(ll range) {
if (float_error(abs(c - p), r) > 1e-6) cerr << "error: 1" << FAIL;
if (distToLine(orig, orig + dir, p) > 1e-6) cerr << "error: 2" << FAIL;
}
- queries += sz(got);
+ queries += ssize(got);
}
cerr << "tested circleIntersection: " << queries << endl;
}
diff --git a/test/geometry/closestPair.cpp b/test/geometry/closestPair.cpp
index 5959b21..4e558dc 100644
--- a/test/geometry/closestPair.cpp
+++ b/test/geometry/closestPair.cpp
@@ -13,7 +13,7 @@ ll isqrt(ll x) {return (ll)sqrtl(x);}
//strict convex hull
ll naive(const vector<pt>& ps) {
ll opt = LL::INF;
- for (ll i = 0; i < sz(ps); i++) {
+ for (ll i = 0; i < ssize(ps); i++) {
for (ll j = 0; j < i; j++) {
opt = min(opt, norm(ps[i] - ps[j]));
}
diff --git a/test/geometry/closestPair.double.cpp b/test/geometry/closestPair.double.cpp
index 2f8a1ab..9ef039f 100644
--- a/test/geometry/closestPair.double.cpp
+++ b/test/geometry/closestPair.double.cpp
@@ -10,7 +10,7 @@ constexpr ll INF = LL::INF;
//strict convex hull
double naive(const vector<pt>& ps) {
double opt = LL::INF;
- for (ll i = 0; i < sz(ps); i++) {
+ for (ll i = 0; i < ssize(ps); i++) {
for (ll j = 0; j < i; j++) {
opt = min(opt, norm(ps[i] - ps[j]));
}
diff --git a/test/geometry/convexHull.cpp b/test/geometry/convexHull.cpp
index 788a634..0ca52a2 100644
--- a/test/geometry/convexHull.cpp
+++ b/test/geometry/convexHull.cpp
@@ -9,7 +9,7 @@ constexpr ll EPS = 0;
//strict convex hull
ll isConvexHull(const vector<pt>& ps, const vector<pt>& hull) {
- ll n = sz(hull) - 1;
+ ll n = ssize(hull) - 1;
if (n == 0) {
for (pt p : ps) if (p != hull[0]) return 1;
return 0;
@@ -67,7 +67,7 @@ void performance_test() {
t.start();
auto a = convexHull(ps);
t.stop();
- hash_t hash = sz(a);
+ hash_t hash = ssize(a);
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/geometry/delaunay.cpp b/test/geometry/delaunay.cpp
index 5740b95..b824ad8 100644
--- a/test/geometry/delaunay.cpp
+++ b/test/geometry/delaunay.cpp
@@ -6,28 +6,27 @@ auto cross(pt p, pt a, pt b) {return cross(a - p, b - p);}
#pragma GCC diagnostic ignored "-Wunused-variable"
#include <geometry/delaunay.cpp>
+
vector<pt> convexHull(vector<pt> pts){
- sort(all(pts), [](const pt& a, const pt& b){
- return real(a) == real(b) ? imag(a) < imag(b)
- : real(a) < real(b);
- });
- pts.erase(unique(all(pts)), pts.end());
+ ranges::sort(pts, {},
+ [](pt x) { return pair{real(x), imag(x)}; });
+ pts.erase(begin(ranges::unique(pts)), end(pts));
int k = 0;
- vector<pt> h(2 * sz(pts));
- auto half = [&](auto begin, auto end, int t) {
- for (auto it = begin; it != end; it++) {
- while (k > t && cross(h[k-2], h[k-1], *it) < 0) k--; //allow collinear points!
- h[k++] = *it;
+ vector<pt> h(2 * ssize(pts));
+ auto half = [&](auto &&v, int t) {
+ for (auto x: v) {
+ while (k > t && cross(h[k-2], h[k-1], x) < 0) k--; // allow collinear points
+ h[k++] = x;
}};
- half(all(pts), 1); // Untere Hülle.
- half(next(pts.rbegin()), pts.rend(), k); // Obere Hülle.
+ half(pts, 1); // Untere Hülle.
+ half(pts | views::reverse | views::drop(1), k); // Obere Hülle
h.resize(k);
return h;
}
lll area(const vector<pt>& poly) { //poly[0] == poly.back()
lll res = 0;
- for (int i = 0; i + 1 < sz(poly); i++)
+ for (int i = 0; i + 1 < ssize(poly); i++)
res += cross(poly[i], poly[i + 1]);
return res;
}
@@ -89,15 +88,15 @@ void stress_test(ll range) {
hull.pop_back();
auto got = delaunay(ps);
- if (sz(got) % 3 != 0) cerr << "error: not triangles" << FAIL;
- if (sz(got) / 3 + sz(hull) - 3 + 1 != 2 * sz(ps) - 4) cerr << "error: wrong number" << FAIL;
+ if (ssize(got) % 3 != 0) cerr << "error: not triangles" << FAIL;
+ if (ssize(got) / 3 + ssize(hull) - 3 + 1 != 2 * ssize(ps) - 4) cerr << "error: wrong number" << FAIL;
//all triangles should be oriented ccw
lll gotArea = 0;
- for (int i = 0; i < sz(got); i += 3) gotArea += cross(got[i], got[i+1], got[i+2]);
+ for (int i = 0; i < ssize(got); i += 3) gotArea += cross(got[i], got[i+1], got[i+2]);
if (gotArea != expectedArea) cerr << "error: wrong area" << FAIL;
- for (int i = 0; i < sz(got); i++) {
+ for (int i = 0; i < ssize(got); i++) {
int ii = i + 1;
if (i / 3 != ii / 3) ii -= 3;
for (int j = 0; j < i; j++) {
@@ -111,7 +110,7 @@ void stress_test(ll range) {
for (pt p : ps) seen |= p == got[i];
if (!seen) cerr << "error: invalid point" << FAIL;
}
- for (int i = 0; i < sz(got); i += 3) {
+ for (int i = 0; i < ssize(got); i += 3) {
for (pt p : ps) {
if (p == got[i]) continue;
if (p == got[i+1]) continue;
@@ -131,7 +130,7 @@ void performance_test() {
t.start();
auto got = delaunay(ps);
t.stop();
- hash_t hash = sz(got);
+ hash_t hash = ssize(got);
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/geometry/formulas.cpp b/test/geometry/formulas.cpp
index d63d431..f472e1f 100644
--- a/test/geometry/formulas.cpp
+++ b/test/geometry/formulas.cpp
@@ -107,7 +107,7 @@ void test_uniqueAngle(ll range) {
if (it->second != expected) cerr << "error: inconsistent" << FAIL;
queries++;
}
- cerr << "tested uniqueAngle: " << queries << " (" << sz(seen) << ")" << endl;
+ cerr << "tested uniqueAngle: " << queries << " (" << ssize(seen) << ")" << endl;
}
int main() {
diff --git a/test/geometry/hpi.cpp b/test/geometry/hpi.cpp
index a2326bc..e22e8c6 100644
--- a/test/geometry/hpi.cpp
+++ b/test/geometry/hpi.cpp
@@ -1,4 +1,6 @@
#include "../util.h"
+#define sz(X) (ll)::size(X)
+#define all(X) ::begin(X), ::end(X)
constexpr ll EPS = 0;
#define double ll
#define polar polar<ll>
@@ -14,10 +16,10 @@ ll sgn(ll x) {
//https://cp-algorithms.com/geometry/halfplane-intersection.html
namespace cpalgo {
// Redefine epsilon and infinity as necessary. Be mindful of precision errors.
- const long double eps = 1e-9, inf = 1e9;
+ const long double eps = 1e-9, inf = 1e9;
// Basic point/vector struct.
- struct Point {
+ struct Point {
long double x, y;
explicit Point(long double x_ = 0, long double y_ = 0) : x(x_), y(y_) {}
@@ -26,23 +28,23 @@ namespace cpalgo {
// Addition, substraction, multiply by constant, dot product, cross product.
friend Point operator + (const Point& p, const Point& q) {
- return Point(p.x + q.x, p.y + q.y);
+ return Point(p.x + q.x, p.y + q.y);
}
- friend Point operator - (const Point& p, const Point& q) {
- return Point(p.x - q.x, p.y - q.y);
+ friend Point operator - (const Point& p, const Point& q) {
+ return Point(p.x - q.x, p.y - q.y);
}
- friend Point operator * (const Point& p, const long double& k) {
- return Point(p.x * k, p.y * k);
- }
+ friend Point operator * (const Point& p, const long double& k) {
+ return Point(p.x * k, p.y * k);
+ }
friend long double dot(const Point& p, const Point& q) {
return p.x * q.x + p.y * q.y;
}
- friend long double cross(const Point& p, const Point& q) {
- return p.x * q.y - p.y * q.x;
+ friend long double cross(const Point& p, const Point& q) {
+ return p.x * q.y - p.y * q.x;
}
friend std::ostream& operator<<(std::ostream& os, const Point& p) {
@@ -53,10 +55,10 @@ namespace cpalgo {
};
// Basic half-plane struct.
- struct Halfplane {
+ struct Halfplane {
// 'p' is a passing point of the line and 'pq' is the direction vector of the line.
- Point p, pq;
+ Point p, pq;
long double angle;
Halfplane() {}
@@ -66,16 +68,16 @@ namespace cpalgo {
Halfplane(array<pt, 2> ps) : Halfplane(ps[0], ps[1]) {}
Halfplane(hp h) : Halfplane(h.from, h.to) {}
- // Check if point 'r' is outside this half-plane.
+ // Check if point 'r' is outside this half-plane.
// Every half-plane allows the region to the LEFT of its line.
bool out(const Point& r) {
- return cross(pq, r - p) < -eps;
+ return cross(pq, r - p) < -eps;
}
- // Comparator for sorting.
- bool operator < (const Halfplane& e) const {
+ // Comparator for sorting.
+ bool operator < (const Halfplane& e) const {
return angle < e.angle;
- }
+ }
// Intersection point of the lines of two half-planes. It is assumed they're never parallel.
friend Point inter(const Halfplane& s, const Halfplane& t) {
@@ -89,13 +91,13 @@ namespace cpalgo {
};
// Actual algorithm
- vector<Point> hp_intersect(vector<Halfplane>& H) {
+ vector<Point> hp_intersect(vector<Halfplane>& H) {
/*Point box[4] = { // Bounding box in CCW order
- Point(inf, inf),
- Point(-inf, inf),
- Point(-inf, -inf),
- Point(inf, -inf)
+ Point(inf, inf),
+ Point(-inf, inf),
+ Point(-inf, -inf),
+ Point(inf, -inf)
};
for(int i = 0; i<4; i++) { // Add bounding box half-planes.
@@ -181,7 +183,7 @@ void test_check(ll range) {
auto b = Random::line(range);
auto c = b;
while (cross(b[0] - b[1], c[0] - c[1]) == 0) c = Random::line(range);
-
+
bool got = hp(a[0], a[1]).check(hp(b[0], b[1]), hp(c[0], c[1]));
bool expected = naiveCheck(a, b, c);
diff --git a/test/geometry/polygon.cpp b/test/geometry/polygon.cpp
index 643ea70..29d3251 100644
--- a/test/geometry/polygon.cpp
+++ b/test/geometry/polygon.cpp
@@ -135,7 +135,7 @@ void test_insideConvex(ll range) {
// convex hull without duplicates, h[0] != h.back()
// apply comments if border counts as inside
bool insideOrOnConvex(pt p, const vector<pt>& hull) {
- int l = 0, r = sz(hull) - 1;
+ int l = 0, r = ssize(hull) - 1;
if (cross(hull[0], hull[r], p) > 0) return false;
while (l + 1 < r) {
int m = (l + r) / 2;
@@ -155,7 +155,7 @@ void test_minkowski(ll range) {
auto got = minkowski(A, B);
bool convex = true;
- for (int i = 0; i < sz(got); i++) convex &= cross(got[i], got[(i+1) % sz(got)], got[(i+2) % sz(got)]) >= 0;
+ for (int i = 0; i < ssize(got); i++) convex &= cross(got[i], got[(i+1) % ssize(got)], got[(i+2) % ssize(got)]) >= 0;
if (!convex) cerr << "error: not convex" << FAIL;
for (pt a : A) {
@@ -172,19 +172,19 @@ double naive_dist(const vector<pt>& ps, const vector<pt>& qs) {
//check if intersect
double res = LD::INF;
bool intersect = true;
- for (int i = 0; i < sz(qs); i++) {
+ for (int i = 0; i < ssize(qs); i++) {
bool sep = true;
for (pt p : ps) {
- res = min(res, distToSegment(qs[i], qs[(i+1) % sz(qs)], p));
- sep &= cross(qs[i], qs[(i+1) % sz(qs)], p) <= 0;
+ res = min(res, distToSegment(qs[i], qs[(i+1) % ssize(qs)], p));
+ sep &= cross(qs[i], qs[(i+1) % ssize(qs)], p) <= 0;
}
if (sep) intersect = false;
}
- for (int i = 0; i < sz(ps); i++) {
+ for (int i = 0; i < ssize(ps); i++) {
bool sep = true;
for (pt q : qs) {
- res = min(res, distToSegment(ps[i], ps[(i+1) % sz(ps)], q));
- sep &= cross(ps[i], ps[(i+1) % sz(ps)], q) <= 0;
+ res = min(res, distToSegment(ps[i], ps[(i+1) % ssize(ps)], q));
+ sep &= cross(ps[i], ps[(i+1) % ssize(ps)], q) <= 0;
}
if (sep) intersect = false;
}
@@ -263,10 +263,10 @@ void test_intersect(ll range) {
}
}
}
- if (sz(expected) > 1 && expected[0] == expected[1]) expected.pop_back();
+ if (ssize(expected) > 1 && expected[0] == expected[1]) expected.pop_back();
- sort(all(got));
- sort(all(expected));
+ ranges::sort(got);
+ ranges::sort(expected);
if (got != expected) cerr << "error" << FAIL;
diff --git a/test/geometry/segmentIntersection.cpp b/test/geometry/segmentIntersection.cpp
index 6d3ddd6..5271563 100644
--- a/test/geometry/segmentIntersection.cpp
+++ b/test/geometry/segmentIntersection.cpp
@@ -40,7 +40,7 @@ vector<seg> randomSegs(int n, ll range) {
}
bool naive(vector<seg>& segs) {
- for (ll i = 0; i < sz(segs); i++) {
+ for (ll i = 0; i < ssize(segs); i++) {
for (ll j = 0; j < i; j++) {
if (segmentIntersection(segs[i].a, segs[i].b, segs[j].a, segs[j].b)) return true;
}
diff --git a/test/geometry/sortAround.cpp b/test/geometry/sortAround.cpp
index a27edc8..42ea86b 100644
--- a/test/geometry/sortAround.cpp
+++ b/test/geometry/sortAround.cpp
@@ -24,7 +24,7 @@ void test_tiny() {
};
auto got = expected;
for (int i = 0; i < 100'000; i++) {
- shuffle(all(got), Random::rng);
+ ranges::shuffle(got, Random::rng);
sortAround(0, got);
if (got != expected) cerr << "error" << FAIL;
}
@@ -51,8 +51,8 @@ void stress_test(ll range) {
auto isLeft = [&](pt p){return real(p - c) < 0 || (real(p - c) == 0 && imag(p - c) < 0);};
auto isCCW = [&](pt a, pt b){return cross(c, a, b) > 0;};
- if (!is_partitioned(all(ps), isLeft)) cerr << "error 1" << FAIL;
- auto mid = partition_point(all(ps), isLeft);
+ if (!ranges::is_partitioned(ps, isLeft)) cerr << "error 1" << FAIL;
+ auto mid = ranges::partition_point(ps, isLeft);
if (!is_sorted(ps.begin(), mid, isCCW)) cerr << "error 2" << FAIL;
if (!is_sorted(mid, ps.end(), isCCW)) cerr << "error 3" << FAIL;
queries += n;
diff --git a/test/graph/TSP.cpp b/test/graph/TSP.cpp
index f9aab2e..8a67409 100644
--- a/test/graph/TSP.cpp
+++ b/test/graph/TSP.cpp
@@ -7,9 +7,9 @@ constexpr ll INF = LL::INF;
#include <graph/TSP.cpp>
vector<int> naive() {
- int n = sz(dist);
+ int n = ssize(dist);
vector<int> todo(n - 1);
- iota(all(todo), 1);
+ iota(begin(todo), end(todo), 1);
vector<int> res;
ll best = LL::INF;
do {
@@ -26,7 +26,7 @@ vector<int> naive() {
res.insert(res.begin(), 0);
res.push_back(0);
}
- } while (next_permutation(all(todo)));
+ } while (ranges::next_permutation(todo).found);
return res;
}
@@ -39,7 +39,7 @@ void stress_test() {
auto expected = naive();
auto got = TSP();
-
+
if (got != expected) cerr << "error" << FAIL;
queries += n;
}
diff --git a/test/graph/articulationPoints.bcc.cpp b/test/graph/articulationPoints.bcc.cpp
index 15f5cf2..cee2d0b 100644
--- a/test/graph/articulationPoints.bcc.cpp
+++ b/test/graph/articulationPoints.bcc.cpp
@@ -10,9 +10,9 @@ struct edge {
vector<vector<int>> naiveBCC(int m) {
init(m);
- vector<int> seen(sz(adj), -1);
+ vector<int> seen(ssize(adj), -1);
int run = 0;
- for (int i = 0; i < sz(adj); i++) {
+ for (int i = 0; i < ssize(adj); i++) {
for (auto e : adj[i]) {
run++;
seen[i] = run;
@@ -36,9 +36,9 @@ vector<vector<int>> naiveBCC(int m) {
for (int i = 0; i < m; i++) {
res[findSet(i)].push_back(i);
}
- for (auto& v : res) sort(all(v));
- res.erase(remove_if(all(res), [](const vector<int>& v){return sz(v) <= 1;}), res.end());
- sort(all(res));
+ for (auto& v : res) ranges::sort(v);
+ res.erase(begin(ranges::remove_if(res, [](const vector<int>& v){return ssize(v) <= 1;})), end(res));
+ ranges::sort(res);
return res;
}
@@ -60,12 +60,12 @@ void stress_test_bcc() {
auto expected = naiveBCC(nextId);
find();
- vector<vector<int>> got(sz(bcc));
- for (int i = 0; i < sz(bcc); i++) {
+ vector<vector<int>> got(ssize(bcc));
+ for (int i = 0; i < ssize(bcc); i++) {
for (auto e : bcc[i]) got[i].push_back(e.id);
- sort(all(got[i]));
+ ranges::sort(got[i]);
}
- sort(all(got));
+ ranges::sort(got);
if (got != expected) cerr << "error" << FAIL;
queries += n;
diff --git a/test/graph/articulationPoints.bridges.cpp b/test/graph/articulationPoints.bridges.cpp
index a1b89d2..15408ea 100644
--- a/test/graph/articulationPoints.bridges.cpp
+++ b/test/graph/articulationPoints.bridges.cpp
@@ -7,10 +7,10 @@ struct edge {
#undef Edge
vector<bool> naiveBridges(const vector<pair<int, int>>& edges) {
- vector<bool> res(sz(edges));
+ vector<bool> res(ssize(edges));
- vector<int> seen(sz(adj), -1);
- for (int i = 0; i < sz(edges); i++) {
+ vector<int> seen(ssize(adj), -1);
+ for (int i = 0; i < ssize(edges); i++) {
auto [a, b] = edges[i];
vector<int> todo = {a};
seen[a] = i;
@@ -40,14 +40,14 @@ void stress_test_bridges() {
adj.assign(n, {});
vector<pair<int, int>> edges;
g.forEdges([&](int a, int b){
- adj[a].push_back({a, b, sz(edges)});
- adj[b].push_back({b, a, sz(edges)});
+ adj[a].push_back({a, b, ssize(edges)});
+ adj[b].push_back({b, a, ssize(edges)});
edges.emplace_back(a, b);
});
auto expected = naiveBridges(edges);
find();
- vector<bool> got(sz(edges));
+ vector<bool> got(ssize(edges));
for (auto e : bridges) {
if (got[e.id]) cerr << "error: duclicate" << FAIL;
got[e.id] = true;
diff --git a/test/graph/articulationPoints.cpp b/test/graph/articulationPoints.cpp
index 2567a09..c06f3a3 100644
--- a/test/graph/articulationPoints.cpp
+++ b/test/graph/articulationPoints.cpp
@@ -7,10 +7,10 @@ struct edge {
#undef Edge
vector<bool> naiveArt() {
- vector<bool> res(sz(adj));
+ vector<bool> res(ssize(adj));
- vector<int> seen(sz(adj), -1);
- for (int i = 0; i < sz(adj); i++) {
+ vector<int> seen(ssize(adj), -1);
+ for (int i = 0; i < ssize(adj); i++) {
if (adj[i].empty()) continue;
seen[i] = i;
vector<ll> todo = {adj[i][0].to};
@@ -72,9 +72,9 @@ void performance_test() {
});
t.start();
- find();
+ find();
t.stop();
- hash_t hash = sz(bridges) + sz(bcc);
+ hash_t hash = ssize(bridges) + ssize(bcc);
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/graph/binary_lifting.cpp b/test/graph/binary_lifting.cpp
new file mode 100644
index 0000000..20318da
--- /dev/null
+++ b/test/graph/binary_lifting.cpp
@@ -0,0 +1,60 @@
+#include "../util.h"
+#include <graph/binary_lifting.cpp>
+namespace expected {
+#include <graph/hld.cpp>
+}
+
+void stress_test() {
+ ll queries = 0;
+ for (int tries = 0; tries < 200'000; tries++) {
+ int n = Random::integer<int>(2, 30);
+ Graph<NoData> g(n);
+ g.tree();
+
+ vector<vector<int>> adj(n);
+ g.forEdges([&](int a, int b){
+ adj[a].push_back(b);
+ adj[b].push_back(a);
+ });
+
+ Lift lift(adj, 0);
+
+ expected::adj = adj;
+ expected::init();
+
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j <= i; j++) {
+ auto got = lift.lca(i, j);
+ auto expected = expected::get_lca(i, j);
+ if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
+ }
+ }
+ queries += n;
+ }
+ cerr << "tested random queries: " << queries << endl;
+}
+
+constexpr int N = 1'000'000;
+void performance_test() {
+ timer t;
+ Graph<NoData> g(N);
+ g.tree();
+ vector<vector<int>> adj(N);
+ g.forEdges([&](int a, int b){
+ adj[a].push_back(b);
+ adj[b].push_back(a);
+ });
+
+ hash_t hash = 0;
+ t.start();
+ Lift lift(adj, 0);
+ for (int i = 1; i < N; i++) hash += lift.lca(i-1, i);
+ t.stop();
+ if (t.time > 1000) cerr << "too slow: " << t.time << FAIL;
+ cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
+}
+
+int main() {
+ stress_test();
+ performance_test();
+}
diff --git a/test/graph/bronKerbosch.cpp b/test/graph/bronKerbosch.cpp
index 1ccd493..e0cac22 100644
--- a/test/graph/bronKerbosch.cpp
+++ b/test/graph/bronKerbosch.cpp
@@ -9,7 +9,7 @@ void naive(bits mask = {}, int l = 0) {
if (mask[i]) continue;
if ((adj[i] & mask) == mask) maximal = false;
}
- for (; l < sz(adj); l++) {
+ for (; l < ssize(adj); l++) {
if ((adj[l] & mask) == mask) {
maximal = false;
mask[l] = 1;
@@ -37,10 +37,10 @@ void stress_test() {
naiveCliques.clear();
naive();
- sort(all(cliques), [](bits a, bits b){return a.to_ullong() < b.to_ullong();});
- sort(all(naiveCliques), [](bits a, bits b){return a.to_ullong() < b.to_ullong();});
+ ranges::sort(cliques, {}, [](bits x) { return x.to_ullong(); });
+ ranges::sort(naiveCliques, {}, [](bits x) { return x.to_ullong(); });
- if (cliques != naiveCliques) cerr << "got: " << sz(cliques) << ", expected: " << sz(naiveCliques) << FAIL;
+ if (cliques != naiveCliques) cerr << "got: " << ssize(cliques) << ", expected: " << ssize(naiveCliques) << FAIL;
queries += n;
}
cerr << "tested random queries: " << queries << endl;
@@ -62,7 +62,7 @@ void performance_test() {
bronKerbosch();
t.stop();
- hash_t hash = sz(cliques);
+ hash_t hash = ssize(cliques);
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/graph/centroid.cpp b/test/graph/centroid.cpp
index 41d9d0f..633c3f1 100644
--- a/test/graph/centroid.cpp
+++ b/test/graph/centroid.cpp
@@ -13,9 +13,9 @@ int subtreeSize(int c, int p) {
vector<int> naive() {
vector<int> res;
- for (int i = 0; i < sz(adj); i++) {
+ for (int i = 0; i < ssize(adj); i++) {
bool isCentroid = true;
- for (int j : adj[i]) isCentroid &= 2*subtreeSize(j, i) <= sz(adj);
+ for (int j : adj[i]) isCentroid &= 2*subtreeSize(j, i) <= ssize(adj);
if (isCentroid) res.push_back(i);
}
return res;
@@ -33,16 +33,16 @@ void stress_test() {
adj[a].push_back(b);
adj[b].push_back(a);
});
-
+
auto expected = naive();
- sort(all(expected));
+ ranges::sort(expected);
for (int i = 0; i < n; i++) {
auto [a, b] = find_centroid(i);
vector<int> got;
if (a >= 0) got.push_back(a);
if (b >= 0) got.push_back(b);
- sort(all(got));
+ ranges::sort(got);
if (got != expected) cerr << "error" << FAIL;
}
@@ -63,7 +63,7 @@ void performance_test() {
adj[b].push_back(a);
});
- t.start();
+ t.start();
auto [gotA, gotB] = find_centroid();
t.stop();
hash_t hash = gotA + gotB;
diff --git a/test/graph/connect.cpp b/test/graph/connect.cpp
index bba8104..96dc4be 100644
--- a/test/graph/connect.cpp
+++ b/test/graph/connect.cpp
@@ -52,8 +52,8 @@ void stress_test() {
int m = Random::integer<int>(30, 300);
vector<int> insertOrder(m);
- iota(all(insertOrder), 0);
- shuffle(all(insertOrder), Random::rng);
+ iota(begin(insertOrder), end(insertOrder), 0);
+ ranges::shuffle(insertOrder, Random::rng);
vector<pair<int, int>> edges(m, {-1, -1});
connect con(n, m);
@@ -104,15 +104,15 @@ void performance_test() {
t.stop();
vector<int> insertOrder(M);
- iota(all(insertOrder), 0);
- shuffle(all(insertOrder), Random::rng);
+ iota(begin(insertOrder), end(insertOrder), 0);
+ ranges::shuffle(insertOrder, Random::rng);
vector<bool> inserted(M);
for (int i = 0, j = 0; i < N; i++) {
int a = Random::integer<int>(0, N);
int b = a;
while (b == a) b = Random::integer<int>(0, N);
-
+
t.start();
con.addEdge(a, b, insertOrder[i]);
t.stop();
diff --git a/test/graph/cycleCounting.cpp b/test/graph/cycleCounting.cpp
index 8e53aec..9c7bf0c 100644
--- a/test/graph/cycleCounting.cpp
+++ b/test/graph/cycleCounting.cpp
@@ -4,11 +4,11 @@
int naive(const vector<pair<int, int>>& edges, int n) {
int res = 0;
- for (int i = 1; i < (1ll << sz(edges)); i++) {
+ for (int i = 1; i < (1ll << ssize(edges)); i++) {
vector<int> deg(n);
init(n);
int cycles = 0;
- for (int j = 0; j < sz(edges); j++) {
+ for (int j = 0; j < ssize(edges); j++) {
if (((i >> j) & 1) != 0) {
auto [a, b] = edges[j];
deg[a]++;
@@ -66,7 +66,7 @@ void performance_test() {
t.start();
hash_t hash = cyc.count();
- cerr << sz(cyc.base) << endl;
+ cerr << ssize(cyc.base) << endl;
t.stop();
if (t.time > 1000) cerr << "too slow: " << t.time << FAIL;
diff --git a/test/graph/dijkstra.cpp b/test/graph/dijkstra.cpp
index c0cfb7e..18420ac 100644
--- a/test/graph/dijkstra.cpp
+++ b/test/graph/dijkstra.cpp
@@ -13,21 +13,21 @@ void stress_test() {
int n = Random::integer<int>(2, 30);
int m = Random::integer<int>(n-1, max<int>(n, min<int>(500, n*(n-1) / 2 + 1)));
- vector<vector<path>> adj(n);
+ vector<vector<pair<int, ll>>> adj(n);
vector<edge> edges;
Graph<NoData, true> g(n);
g.erdosRenyi(m);
g.forEdges([&](int a, int b){
ll w = Random::integer<ll>(1, 1'000'000'000'000ll);
- adj[a].push_back({w, b});
+ adj[a].emplace_back(b, w);
edges.push_back({a, b, w});
});
for (int i = 0; i < n; i++) {
auto got = dijkstra(adj, i);
auto expected = bellmannFord(n, edges, i);
-
+
if (got != expected) cerr << "error" << FAIL;
queries += n;
}
@@ -41,12 +41,12 @@ void performance_test() {
timer t;
Graph<NoData> g(N);
g.erdosRenyi(M);
- vector<vector<path>> adj(N);
+ vector<vector<pair<int, ll>>> adj(N);
g.forEdges([&](int a, int b){
ll w1 = Random::integer<ll>(1, 1'000'000'000'000ll);
ll w2 = Random::integer<ll>(1, 1'000'000'000'000ll);
- adj[a].push_back({w1, b});
- adj[b].push_back({w2, a});
+ adj[a].emplace_back(b, w1);
+ adj[b].emplace_back(a, w2);
});
t.start();
diff --git a/test/graph/euler.cpp b/test/graph/euler.cpp
index 457ca99..353cff2 100644
--- a/test/graph/euler.cpp
+++ b/test/graph/euler.cpp
@@ -20,7 +20,7 @@ Euler eulerGraph(int n, int m) {
}
int last = -1;
for (int i = 0; i < n; i++) {
- if (sz(res.adj[i]) % 2 != 0) {
+ if (ssize(res.adj[i]) % 2 != 0) {
if (last >= 0) {
res.addEdge(last, i);
last = -1;
@@ -41,25 +41,25 @@ void stress_test() {
int m = Random::integer<int>(n-1, 200);
auto g = eulerGraph(n, m);
-
+
vector<vector<int>> expected(n);
for (int i = 0; i < n; i++) {
for (auto [j, rev] : g.adj[i]) {
expected[i].push_back(j);
}
- sort(all(expected[i]));
+ ranges::sort(expected[i]);
}
g.euler(0);
vector<vector<int>> got(n);
if (g.cycle.front() != g.cycle.back()) cerr << "error: not cyclic" << FAIL;
- for (int i = 1; i < sz(g.cycle); i++) {
+ for (int i = 1; i < ssize(g.cycle); i++) {
int a = g.cycle[i-1];
int b = g.cycle[i];
got[a].push_back(b);
got[b].push_back(a);
}
- for (auto& v : got) sort(all(v));
+ for (auto& v : got) ranges::sort(v);
if (got != expected) cerr << "error" << FAIL;
queries += n;
diff --git a/test/graph/floydWarshall.cpp b/test/graph/floydWarshall.cpp
index a93a9ea..182b99b 100644
--- a/test/graph/floydWarshall.cpp
+++ b/test/graph/floydWarshall.cpp
@@ -40,7 +40,7 @@ void stress_test() {
if (path.empty()) continue;
if (path.front() != i) cerr << "error: start" << FAIL;
if (path.back() != k) cerr << "error: end" << FAIL;
- for (int l = 1; l < sz(path); l++) {
+ for (int l = 1; l < ssize(path); l++) {
if (floydWarshall::dist[i][path[l-1]] +
orig[path[l-1]][path[l]] +
floydWarshall::dist[path[l]][k] !=
@@ -52,7 +52,7 @@ void stress_test() {
for (int i = 0; i < n; i++) {
auto got = floydWarshall::dist[i];
auto expected = bellmannFord(n, edges, i);
-
+
if (got != expected) cerr << "error" << FAIL;
queries += n;
}
diff --git a/test/graph/havelHakimi.cpp b/test/graph/havelHakimi.cpp
index 71476ec..9491db2 100644
--- a/test/graph/havelHakimi.cpp
+++ b/test/graph/havelHakimi.cpp
@@ -13,11 +13,11 @@ void stress_test() {
for (int i = 0; i < n; i++) expected[i] = g.deg(i);
auto res = havelHakimi(expected);
- if (sz(res) != n) cerr << "error: wrong number of nodes" << FAIL;
+ if (ssize(res) != n) cerr << "error: wrong number of nodes" << FAIL;
vector<vector<int>> rev(n);
vector<int> got(n);
for (int i = 0; i < n; i++) {
- got[i] = sz(res[i]);
+ got[i] = ssize(res[i]);
for (int j : res[i]) {
if (j < 0 || j >= n) cerr << "error: invalid edge" << FAIL;
rev[j].push_back(i);
@@ -25,11 +25,11 @@ void stress_test() {
}
for (int i = 0; i < n; i++) {
- sort(all(res[i]));
- sort(all(rev[i]));
+ ranges::sort(res[i]);
+ ranges::sort(rev[i]);
if (res[i] != rev[i]) cerr << "error: graph is directed" << FAIL;
for (int j : res[i]) if (j == i) cerr << "error: graph has loop" << FAIL;
- for (int j = 1; j < sz(res[i]); j++) {
+ for (int j = 1; j < ssize(res[i]); j++) {
if (res[i][j] == res[i][j-1]) cerr << "error: multiedge" << FAIL;
}
}
@@ -54,7 +54,7 @@ void performance_test() {
auto res = havelHakimi(expected);
t.stop();
hash_t hash = 0;
- for (auto& v : res) hash += sz(v);
+ for (auto& v : res) hash += ssize(v);
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/graph/hopcroftKarp.cpp b/test/graph/hopcroftKarp.cpp
index 05599dd..df2cec2 100644
--- a/test/graph/hopcroftKarp.cpp
+++ b/test/graph/hopcroftKarp.cpp
@@ -1,6 +1,6 @@
#include "../util.h"
namespace kuhn {
-#include <graph/maxCarBiMatch.cpp>
+#include <graph/kuhn.cpp>
}
namespace hk {
#include <graph/hopcroftKarp.cpp>
diff --git a/test/graph/maxCarBiMatch.cpp b/test/graph/kuhn.cpp
index 6d7fad0..8b7e13b 100644
--- a/test/graph/maxCarBiMatch.cpp
+++ b/test/graph/kuhn.cpp
@@ -1,6 +1,6 @@
#include "../util.h"
namespace kuhn {
-#include <graph/maxCarBiMatch.cpp>
+#include <graph/kuhn.cpp>
}
namespace hk {
#include <graph/hopcroftKarp.cpp>
diff --git a/test/graph/reroot.cpp b/test/graph/reroot.cpp
index d5043b4..93f946b 100644
--- a/test/graph/reroot.cpp
+++ b/test/graph/reroot.cpp
@@ -47,7 +47,7 @@ void performance_test() {
t.start();
Reroot re;
auto ans = re.solve();
- hash = accumulate(all(ans), 0LL);
+ hash = accumulate(begin(ans), end(ans), 0LL);
t.stop();
if (t.time > 1000) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
diff --git a/test/graph/stoerWagner.cpp b/test/graph/stoerWagner.cpp
index 2003f09..e7a1075 100644
--- a/test/graph/stoerWagner.cpp
+++ b/test/graph/stoerWagner.cpp
@@ -13,7 +13,7 @@ namespace pushRelabel {
#include <graph/pushRelabel.cpp>
ll minCut() {
ll res = INF;
- for (int i = 0; i < sz(adj); i++) {
+ for (int i = 0; i < ssize(adj); i++) {
for (int j = 0; j < i; j++) {
if (i == j) continue;
res = min(res, maxFlow(i, j));
@@ -48,7 +48,7 @@ void stress_test() {
ll got = stoerWagner::stoer_wagner();
ll expected = pushRelabel::minCut();
-
+
if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
queries += n;
}
diff --git a/test/graph/treeIsomorphism.cpp b/test/graph/treeIsomorphism.cpp
index 97f4df4..4daa373 100644
--- a/test/graph/treeIsomorphism.cpp
+++ b/test/graph/treeIsomorphism.cpp
@@ -45,7 +45,7 @@ void stress_test_eq() {
void test_tiny() {
vector<int> expected = {1,1,1,1,2,3,6,11,23}; //#A000055
- for (int i = 1; i < sz(expected); i++) {
+ for (int i = 1; i < ssize(expected); i++) {
set<pair<int, int>> got;
tree t(i);
@@ -63,9 +63,9 @@ void test_tiny() {
got.insert(t.treeLabel());
}
- if (sz(got) != expected[i]) cerr << i << ", got: " << sz(got) << ", expected: " << expected[i] << FAIL;
+ if (ssize(got) != expected[i]) cerr << i << ", got: " << ssize(got) << ", expected: " << expected[i] << FAIL;
}
- cerr << "tested tiny: " << sz(expected) << endl;
+ cerr << "tested tiny: " << ssize(expected) << endl;
}
void stress_test_neq() {
@@ -110,7 +110,7 @@ void performance_test() {
tt.adj[b].push_back(a);
});
- t.start();
+ t.start();
auto [gotA, gotB] = tt.treeLabel();
t.stop();
hash_t hash = gotA + gotB;
diff --git a/test/graph/virtualTree.cpp b/test/graph/virtualTree.cpp
index d256760..0bd71d9 100644
--- a/test/graph/virtualTree.cpp
+++ b/test/graph/virtualTree.cpp
@@ -21,7 +21,7 @@ int lca(int u, int v) {
}
void init(vector<vector<int>>& adj) {
- int n = (int)sz(adj);
+ int n = (int)ssize(adj);
d.assign(n, 0);
in = par = out = d;
int counter = 0;
@@ -44,7 +44,7 @@ void stress_test() {
vector<int> ind = Random::distinct(Random::integer(1, n+1), 0, n);
auto [idk, tree] = virtualTree(ind);
vector<pair<int, int>> edges;
- for (int i=0; i<sz(idk); i++) for (int v : tree[i]) {
+ for (int i=0; i<ssize(idk); i++) for (int v : tree[i]) {
edges.emplace_back(idk[i], idk[v]);
}
@@ -60,7 +60,7 @@ void stress_test() {
};
dfs(dfs, 0, -1, -1);
- sort(all(edges)), sort(all(edges2));
+ ranges::sort(edges), ranges::sort(edges2);
if (edges != edges2) cerr << "WA edge list does not match" << FAIL;
}
cerr << "tested random 50'000 tests" << endl;
@@ -83,7 +83,7 @@ void performance_test() {
ll hash = 0;
t.start();
auto [idk, tree] = virtualTree(ind);
- hash = accumulate(all(idk), 0LL);
+ hash = accumulate(begin(idk), end(idk), 0LL);
t.stop();
if (t.time > 1000) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
diff --git a/test/math/berlekampMassey.cpp b/test/math/berlekampMassey.cpp
index 58fd143..a9d5709 100644
--- a/test/math/berlekampMassey.cpp
+++ b/test/math/berlekampMassey.cpp
@@ -12,10 +12,10 @@ struct RandomRecurence {
}
ll operator()(ll k){
- while (sz(cache) <= k) {
+ while (ssize(cache) <= k) {
ll cur = 0;
- for (ll i = 0; i < sz(c); i++) {
- cur += (c[i] * cache[sz(cache) - i - 1]) % mod;
+ for (ll i = 0; i < ssize(c); i++) {
+ cur += (c[i] * cache[ssize(cache) - i - 1]) % mod;
}
cur %= mod;
cache.push_back(cur);
@@ -60,7 +60,7 @@ void performance_test() {
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
-
+
int main() {
stress_test();
performance_test();
diff --git a/test/math/bigint.cpp b/test/math/bigint.cpp
index 3fc4ac1..538d0dc 100644
--- a/test/math/bigint.cpp
+++ b/test/math/bigint.cpp
@@ -9,7 +9,7 @@ struct modInt {
stringstream a;
a << x;
string b = a.str();
- for (ll i = b[0] == '-' ? 1 : 0; i < sz(b); i++) {
+ for (ll i = b[0] == '-' ? 1 : 0; i < ssize(b); i++) {
value *= 10;
value += b[i] - '0';
value %= MOD;
@@ -115,7 +115,7 @@ void stress_test() {
}
cerr << "tested random queries: " << queries << endl;
}
-
+
int main() {
stress_test();
}
diff --git a/test/math/binomial0.cpp b/test/math/binomial0.cpp
index 00c04d4..25ee344 100644
--- a/test/math/binomial0.cpp
+++ b/test/math/binomial0.cpp
@@ -14,7 +14,7 @@ void stress_test() {
ll expected = last[j];
if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL;
}
- queries += sz(last);
+ queries += ssize(last);
last.push_back(1);
for (ll j = i; j > 0; j--) {
diff --git a/test/math/binomial1.cpp b/test/math/binomial1.cpp
index f6fe20b..f7d06dd 100644
--- a/test/math/binomial1.cpp
+++ b/test/math/binomial1.cpp
@@ -11,7 +11,7 @@ void stress_test() {
ll expected = last[j];
if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL;
}
- queries += sz(last);
+ queries += ssize(last);
last.push_back(1);
for (ll j = i; j > 0; j--) {
diff --git a/test/math/binomial2.cpp b/test/math/binomial2.cpp
index b55c8af..0b6178e 100644
--- a/test/math/binomial2.cpp
+++ b/test/math/binomial2.cpp
@@ -12,7 +12,7 @@ void stress_test() {
ll expected = last[j];
if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL;
}
- queries += sz(last);
+ queries += ssize(last);
last.push_back(1);
for (ll j = i; j > 0; j--) {
diff --git a/test/math/binomial3.cpp b/test/math/binomial3.cpp
index 4a99689..c4791d0 100644
--- a/test/math/binomial3.cpp
+++ b/test/math/binomial3.cpp
@@ -15,7 +15,7 @@ void stress_test() {
ll expected = last[j];
if (got != expected) cerr << "calc_binom(" << i << ", " << j << "), got: " << got << ", expected: " << expected << FAIL;
}
- queries += sz(last);
+ queries += ssize(last);
last.push_back(1);
for (ll j = i; j > 0; j--) {
diff --git a/test/math/cycleDetection.cpp b/test/math/cycleDetection.cpp
index bf57aed..1694589 100644
--- a/test/math/cycleDetection.cpp
+++ b/test/math/cycleDetection.cpp
@@ -1,5 +1,4 @@
#include "../util.h"
-#include <datastructures/pbds.cpp>
#include <math/cycleDetection.cpp>
pair<ll, ll> naive(ll x0, function<ll(ll)> f) {
diff --git a/test/math/gauss.cpp b/test/math/gauss.cpp
index 6e4759d..eb8f641 100644
--- a/test/math/gauss.cpp
+++ b/test/math/gauss.cpp
@@ -7,10 +7,10 @@ vector<vector<double>> mat;
#include <math/gauss.cpp>
vector<vector<double>> inverseMat(const vector<vector<double>>& m) {
- int n = sz(m);
+ int n = ssize(m);
mat = m;
for (int i = 0; i < n; i++) {
- if (sz(mat[i]) != n) cerr << "error: no square matrix" << FAIL;
+ if (ssize(mat[i]) != n) cerr << "error: no square matrix" << FAIL;
mat[i].resize(2*n);
mat[i][n+i] = 1;
}
@@ -27,10 +27,10 @@ vector<vector<double>> inverseMat(const vector<vector<double>>& m) {
}
vector<vector<double>> mul(const vector<vector<double>>& a, const vector<vector<double>>& b) {
- int n = sz(a);
- int m = sz(b[0]);
- int x = sz(b);
- if (sz(a[0]) != sz(b)) cerr << "error: wrong dimensions" << FAIL;
+ int n = ssize(a);
+ int m = ssize(b[0]);
+ int x = ssize(b);
+ if (ssize(a[0]) != ssize(b)) cerr << "error: wrong dimensions" << FAIL;
vector<vector<double>> res(n, vector<double>(m));
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
@@ -48,21 +48,21 @@ void test_tiny() {
{0, 5, 6, 7},
{0, 0, 8, 9},
};
- if (gauss(sz(mat)) != UNIQUE) cerr << "error: 1" << FAIL;
+ if (gauss(ssize(mat)) != UNIQUE) cerr << "error: 1" << FAIL;
mat = {
{-1, 1, 0, -1},
{ 2, 6, 0, 10},
{ 1, -2, 0, 0},
};
- if (gauss(sz(mat)) != MULTIPLE) cerr << "error: 2" << FAIL;
+ if (gauss(ssize(mat)) != MULTIPLE) cerr << "error: 2" << FAIL;
mat = {
{-1, 1, 0, -1},
{ 2, 6, 0, 10},
{ 1, -2, 0, 1},
};
- if (gauss(sz(mat)) != INCONSISTENT) cerr << "error: 3" << FAIL;
+ if (gauss(ssize(mat)) != INCONSISTENT) cerr << "error: 3" << FAIL;
}
void stress_test_inv() {
diff --git a/test/math/inversions.cpp b/test/math/inversions.cpp
index d2a54b7..fc825e4 100644
--- a/test/math/inversions.cpp
+++ b/test/math/inversions.cpp
@@ -1,10 +1,9 @@
#include "../util.h"
-#include <datastructures/pbds.cpp>
#include <math/inversions.cpp>
ll naive(const vector<ll>& v) {
ll res = 0;
- for (ll i = 0; i < sz(v); i++) {
+ for (ll i = 0; i < ssize(v); i++) {
for (ll j = 0; j < i; j++) {
if (v[j] > v[i]) res++;
}
diff --git a/test/math/inversionsMerge.cpp b/test/math/inversionsMerge.cpp
index acdc555..7d1b0d7 100644
--- a/test/math/inversionsMerge.cpp
+++ b/test/math/inversionsMerge.cpp
@@ -3,7 +3,7 @@
ll naive(const vector<ll>& v) {
ll res = 0;
- for (ll i = 0; i < sz(v); i++) {
+ for (ll i = 0; i < ssize(v); i++) {
for (ll j = 0; j < i; j++) {
if (v[j] > v[i]) res++;
}
@@ -17,7 +17,7 @@ void stress_test() {
int n = Random::integer<int>(1, 100);
vector<ll> v(n);
for (ll j = 0; j < n; j++) v[j] = (j-10) * 100000 + Random::integer<ll>(0, 10000); //values must be unique ):
- shuffle(all(v), Random::rng);
+ ranges::shuffle(v, Random::rng);
ll expected = naive(v);
ll got = mergeSort(v);
if (got != expected) {
diff --git a/test/math/kthperm.cpp b/test/math/kthperm.cpp
index 16691b9..1b3e803 100644
--- a/test/math/kthperm.cpp
+++ b/test/math/kthperm.cpp
@@ -1,5 +1,4 @@
#include "../util.h"
-#include <datastructures/pbds.cpp>
#include <math/kthperm.cpp>
void stress_test() {
@@ -7,13 +6,13 @@ void stress_test() {
for (ll i = 0; i < 10'000; i++) {
int n = Random::integer<int>(1, 100);
vector<ll> expected(n);
- iota(all(expected), 0);
+ iota(begin(expected), end(expected), 0);
ll k = 0;
do {
auto got = kthperm(n, k);
if (got != expected) cerr << "error" << FAIL;
k++;
- } while (k < 100 && next_permutation(all(expected)));
+ } while (k < 100 && ranges::next_permutation(expected).found);
queries += n;
}
cerr << "tested queries: " << queries << endl;
diff --git a/test/math/kthperm_permIndex.cpp b/test/math/kthperm_permIndex.cpp
index d84524e..5e05c73 100644
--- a/test/math/kthperm_permIndex.cpp
+++ b/test/math/kthperm_permIndex.cpp
@@ -1,5 +1,4 @@
#include "../util.h"
-#include <datastructures/pbds.cpp>
#include <math/kthperm.cpp>
#include <math/permIndex.cpp>
diff --git a/test/math/lgsFp.cpp b/test/math/lgsFp.cpp
index 08f8f84..fa2dea3 100644
--- a/test/math/lgsFp.cpp
+++ b/test/math/lgsFp.cpp
@@ -6,10 +6,10 @@ vector<vector<ll>> mat;
constexpr ll mod = 1'000'000'007;
vector<vector<ll>> inverseMat(const vector<vector<ll>>& m) {
- int n = sz(m);
+ int n = ssize(m);
mat = m;
for (int i = 0; i < n; i++) {
- if (sz(mat[i]) != n) cerr << "error: no square matrix" << FAIL;
+ if (ssize(mat[i]) != n) cerr << "error: no square matrix" << FAIL;
mat[i].resize(2*n);
mat[i][n+i] = 1;
}
@@ -26,10 +26,10 @@ vector<vector<ll>> inverseMat(const vector<vector<ll>>& m) {
}
vector<vector<ll>> mul(const vector<vector<ll>>& a, const vector<vector<ll>>& b) {
- int n = sz(a);
- int m = sz(b[0]);
- int x = sz(b);
- if (sz(a[0]) != sz(b)) cerr << "error: wrong dimensions" << FAIL;
+ int n = ssize(a);
+ int m = ssize(b[0]);
+ int x = ssize(b);
+ if (ssize(a[0]) != ssize(b)) cerr << "error: wrong dimensions" << FAIL;
vector<vector<ll>> res(n, vector<ll>(m));
for (int i = 0; i < n; i++) {
for (int j = 0; j < m; j++) {
diff --git a/test/math/linearRecurrence.cpp b/test/math/linearRecurrence.cpp
index f0ebe76..79607ac 100644
--- a/test/math/linearRecurrence.cpp
+++ b/test/math/linearRecurrence.cpp
@@ -6,16 +6,15 @@ vector<ll> mul(const vector<ll>& a, const vector<ll>& b) {
return mulSlow(a, b);
}
-
struct RandomRecurence {
vector<ll> f, c, cache;
RandomRecurence(int n) : f(Random::integers<ll>(n, 0, mod)), c(Random::integers<ll>(n, 0, mod)), cache(f) {}
ll operator()(ll k){
- while (sz(cache) <= k) {
+ while (ssize(cache) <= k) {
ll cur = 0;
- for (ll i = 0; i < sz(c); i++) {
- cur += (c[i] * cache[sz(cache) - i - 1]) % mod;
+ for (ll i = 0; i < ssize(c); i++) {
+ cur += (c[i] * cache[ssize(cache) - i - 1]) % mod;
}
cur %= mod;
cache.push_back(cur);
diff --git a/test/math/linearRecurrenceNTT.cpp b/test/math/linearRecurrenceNTT.cpp
index e03c27e..922d965 100644
--- a/test/math/linearRecurrenceNTT.cpp
+++ b/test/math/linearRecurrenceNTT.cpp
@@ -12,10 +12,10 @@ struct RandomRecurence {
RandomRecurence(int n) : f(Random::integers<ll>(n, 0, mod)), c(Random::integers<ll>(n, 0, mod)), cache(f) {}
ll operator()(ll k){
- while (sz(cache) <= k) {
+ while (ssize(cache) <= k) {
ll cur = 0;
- for (ll i = 0; i < sz(c); i++) {
- cur += (c[i] * cache[sz(cache) - i - 1]) % mod;
+ for (ll i = 0; i < ssize(c); i++) {
+ cur += (c[i] * cache[ssize(cache) - i - 1]) % mod;
}
cur %= mod;
cache.push_back(cur);
diff --git a/test/math/linearRecurrenceOld.cpp b/test/math/linearRecurrenceOld.cpp
index dab2256..70609f0 100644
--- a/test/math/linearRecurrenceOld.cpp
+++ b/test/math/linearRecurrenceOld.cpp
@@ -6,10 +6,10 @@ struct RandomRecurence {
RandomRecurence(int n) : f(Random::integers<ll>(n, 0, mod)), c(Random::integers<ll>(n, 0, mod)), cache(f) {}
ll operator()(ll k){
- while (sz(cache) <= k) {
+ while (ssize(cache) <= k) {
ll cur = 0;
- for (ll i = 0; i < sz(c); i++) {
- cur += (c[i] * cache[sz(cache) - i - 1]) % mod;
+ for (ll i = 0; i < ssize(c); i++) {
+ cur += (c[i] * cache[ssize(cache) - i - 1]) % mod;
}
cur %= mod;
cache.push_back(cur);
diff --git a/test/math/linearSieve.cpp b/test/math/linearSieve.cpp
index 8ea822b..527e729 100644
--- a/test/math/linearSieve.cpp
+++ b/test/math/linearSieve.cpp
@@ -57,7 +57,7 @@ void performance_test() {
timer t;
t.start();
sieve();
- hash_t hash = sz(primes);
+ hash_t hash = ssize(primes);
t.stop();
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
diff --git a/test/math/longestIncreasingSubsequence.cpp b/test/math/longestIncreasingSubsequence.cpp
index 407dafe..befee75 100644
--- a/test/math/longestIncreasingSubsequence.cpp
+++ b/test/math/longestIncreasingSubsequence.cpp
@@ -9,7 +9,7 @@ constexpr ll INF = LL::INF;
template<bool STRICT>
bool isLis(const vector<ll>& a, const vector<int>& lis) {
- for (int i = 1; i < sz(lis); i++) {
+ for (int i = 1; i < ssize(lis); i++) {
if (lis[i-1] >= lis[i]) return false;
if (a[lis[i-1]] > a[lis[i]]) return false;
if (STRICT && a[lis[i-1]] == a[lis[i]]) return false;
@@ -20,12 +20,12 @@ bool isLis(const vector<ll>& a, const vector<int>& lis) {
template<bool STRICT>
vector<int> naive(const vector<ll>& a) {
vector<int> res;
- for (ll i = 1; i < (1ll << sz(a)); i++) {
+ for (ll i = 1; i < (1ll << ssize(a)); i++) {
vector<int> tmp;
- for (ll j = 0; j < sz(a); j++) {
+ for (ll j = 0; j < ssize(a); j++) {
if (((i >> j) & 1) != 0) tmp.push_back(j);
}
- if (sz(tmp) >= sz(res) && isLis<STRICT>(a, tmp)) res = tmp;
+ if (ssize(tmp) >= ssize(res) && isLis<STRICT>(a, tmp)) res = tmp;
}
return res;
}
@@ -56,10 +56,9 @@ void performance_test() {
timer t;
auto a = Random::integers<ll>(N, -10'000, 10'000);
auto b = Random::integers<ll>(N, -10'000, 10'000);
- sort(all(b));
+ ranges::sort(b);
auto c = Random::integers<ll>(N, -10'000, 10'000);
- sort(all(c));
- reverse(all(c));
+ ranges::sort(c | views::reverse);
hash_t hash = 0;
t.start();
hash += lis(a).size();
diff --git a/test/math/matrixPower.cpp b/test/math/matrixPower.cpp
index 4dfb0a8..169ff06 100644
--- a/test/math/matrixPower.cpp
+++ b/test/math/matrixPower.cpp
@@ -7,15 +7,15 @@ struct mat {
mat(int dim = 0, int diag = 1) : m(dim, vector<ll>(dim)) {
for (int i = 0; i < dim; i++) m[i][i] = diag;
}
- mat(const vector<ll> c) : m(sz(c), vector<ll>(sz(c))) {
+ mat(const vector<ll> c) : m(ssize(c), vector<ll>(ssize(c))) {
m[0] = c;
- for (ll i = 1; i < sz(c); i++) {
+ for (ll i = 1; i < ssize(c); i++) {
m[i][i-1] = 1;
}
}
mat operator*(const mat& o) const {
- int dim = sz(m);
+ int dim = ssize(m);
mat res(dim, 0);
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
@@ -29,7 +29,7 @@ struct mat {
}
vector<ll> operator*(const vector<ll>& o) const {
- int dim = sz(m);
+ int dim = ssize(m);
vector<ll> res(dim);
for (int i = 0; i < dim; i++) {
for (int j = 0; j < dim; j++) {
@@ -48,10 +48,10 @@ struct RandomRecurence {
RandomRecurence(int n) : f(Random::integers<ll>(n, 0, mod)), c(Random::integers<ll>(n, 0, mod)), cache(f) {}
ll operator()(ll k){
- while (sz(cache) <= k) {
+ while (ssize(cache) <= k) {
ll cur = 0;
- for (ll i = 0; i < sz(c); i++) {
- cur += (c[i] * cache[sz(cache) - i - 1]) % mod;
+ for (ll i = 0; i < ssize(c); i++) {
+ cur += (c[i] * cache[ssize(cache) - i - 1]) % mod;
}
cur %= mod;
cache.push_back(cur);
@@ -67,13 +67,13 @@ void stress_test() {
RandomRecurence f(n);
precalc(mat(f.c));
auto tmp = f.f;
- reverse(all(tmp));
+ ranges::reverse(tmp);
for (int j = 0; j < 100; j++) {
ll k = Random::integer<ll>(0, 1000);
vector<ll> got = calc(k, tmp);
- vector<ll> expected(sz(f.f));
+ vector<ll> expected(ssize(f.f));
for (ll l = 0; l < n; l++) expected[n - 1 - l] = f(k + l);
if (got != expected) cerr << "error" << FAIL;
@@ -89,7 +89,7 @@ void performance_test() {
timer t;
RandomRecurence f(N);
auto tmp = f.f;
- reverse(all(tmp));
+ ranges::reverse(tmp);
t.start();
precalc(mat(f.c));
diff --git a/test/math/millerRabin.base32.cpp b/test/math/millerRabin.base32.cpp
index 742d353..8c2c79a 100644
--- a/test/math/millerRabin.base32.cpp
+++ b/test/math/millerRabin.base32.cpp
@@ -95,7 +95,7 @@ void extra_tests() {
t.start();
auto got = isPrime(x);
t.stop();
- bool expected = sz(factors) == 1 && factors.begin()->second == 1;
+ bool expected = ssize(factors) == 1 && factors.begin()->second == 1;
if (got != expected) cerr << "error: " << x << FAIL;
}
if (t.time > 10) cerr << "too slow" << FAIL;
diff --git a/test/math/millerRabin.cpp b/test/math/millerRabin.cpp
index fd98586..725b744 100644
--- a/test/math/millerRabin.cpp
+++ b/test/math/millerRabin.cpp
@@ -87,7 +87,7 @@ void extra_tests() {
t.start();
auto got = isPrime(x);
t.stop();
- bool expected = sz(factors) == 1 && factors.begin()->second == 1;
+ bool expected = ssize(factors) == 1 && factors.begin()->second == 1;
if (got != expected) cerr << "error: " << x << FAIL;
}
if (t.time > 10) cerr << "too slow" << FAIL;
diff --git a/test/math/permIndex.cpp b/test/math/permIndex.cpp
index 61d34c8..d68ba3a 100644
--- a/test/math/permIndex.cpp
+++ b/test/math/permIndex.cpp
@@ -1,5 +1,4 @@
#include "../util.h"
-#include <datastructures/pbds.cpp>
#include <math/permIndex.cpp>
void stress_test() {
@@ -7,13 +6,13 @@ void stress_test() {
for (ll i = 0; i < 10'000; i++) {
int n = Random::integer<int>(1, 100);
vector<ll> cur(n);
- iota(all(cur), 0);
+ iota(begin(cur), end(cur), 0);
ll expected = 0;
do {
auto got = permIndex(cur);
if (got != expected) cerr << "got: " << got << ", expected: " << expected << FAIL;
expected++;
- } while (expected < 100 && next_permutation(all(cur)));
+ } while (expected < 100 && ranges::next_permutation(cur).found);
queries += n;
}
cerr << "tested queries: " << queries << endl;
@@ -23,7 +22,7 @@ constexpr int N = 500'000;
void performance_test() {
timer t;
vector<ll> cur(N);
- iota(all(cur), 0);
+ iota(begin(cur), end(cur), 0);
reverse(cur.end() - 10, cur.end());
t.start();
auto hash = permIndex(cur);
diff --git a/test/math/polynomial.cpp b/test/math/polynomial.cpp
index f4a9486..adf3773 100644
--- a/test/math/polynomial.cpp
+++ b/test/math/polynomial.cpp
@@ -11,7 +11,7 @@ poly randomPoly(int deg) {
ll eval(const vector<ll>& p, ll x) {
ll res = 0;
- for (ll i = 0, j = 1; i < sz(p); i++, j = (j * x) % mod) {
+ for (ll i = 0, j = 1; i < ssize(p); i++, j = (j * x) % mod) {
res += j * p[i];
res %= mod;
}
@@ -50,7 +50,7 @@ void test_add() {
auto c = a;
c += b;
- if (sz(c) > sz(a) && sz(c) > sz(b)) cerr << "error: wrong degree" << FAIL;
+ if (ssize(c) > ssize(a) && ssize(c) > ssize(b)) cerr << "error: wrong degree" << FAIL;
for (int i = 0; i <= n + m + 7; i++) {
ll x = Random::integer<ll>(0, mod);
@@ -74,7 +74,7 @@ void test_mul() {
auto b = randomPoly(m);
auto c = a * b;
- if (sz(c) > sz(a) + sz(b) - 1) cerr << "error: wrong degree" << FAIL;
+ if (ssize(c) > ssize(a) + ssize(b) - 1) cerr << "error: wrong degree" << FAIL;
for (int i = 0; i <= n + m + 7; i++) {
ll x = Random::integer<ll>(0, mod);
@@ -97,8 +97,8 @@ void test_shift() {
auto a = randomPoly(n);
auto b = a << m;
- if (sz(b) > sz(a)) cerr << sz(a) << " " << sz(b) << endl;
- if (sz(b) > sz(a)) cerr << "error: wrong degree" << FAIL;
+ if (ssize(b) > ssize(a)) cerr << ssize(a) << " " << ssize(b) << endl;
+ if (ssize(b) > ssize(a)) cerr << "error: wrong degree" << FAIL;
for (int i = 0; i <= n + 7; i++) {
ll x = Random::integer<ll>(0, mod);
@@ -126,8 +126,8 @@ void test_divmod() {
auto b = randomPoly(m);
auto [div, rem] = a.divmod(b);
- if (sz(rem) > sz(b)) cerr << "error: wrong degree (rem)" << FAIL;
- if (sz(div) > 1 + max<ll>(0, sz(a) - sz(b))) cerr << "error: wrong degree (div)" << FAIL;
+ if (ssize(rem) > ssize(b)) cerr << "error: wrong degree (rem)" << FAIL;
+ if (ssize(div) > 1 + max<ll>(0, ssize(a) - ssize(b))) cerr << "error: wrong degree (div)" << FAIL;
for (int i = 0; i <= n + m; i++) {
ll x = Random::integer<ll>(0, mod);
@@ -142,7 +142,7 @@ void test_divmod() {
}
cerr << "tested divmod: " << queries << endl;
}
-
+
int main() {
test_eval();
test_add();
diff --git a/test/math/primeSieve.cpp b/test/math/primeSieve.cpp
index 78a50d2..6bb63f6 100644
--- a/test/math/primeSieve.cpp
+++ b/test/math/primeSieve.cpp
@@ -18,7 +18,7 @@ void stress_test() {
if (got) found.push_back(i);
queries++;
}
- primes.resize(sz(found));
+ primes.resize(ssize(found));
if (primes != found) cerr << "error: primes" << FAIL;
for (int i = 0; i < 1'000'000; i++) {
ll x = Random::integer<ll>(2, N);
@@ -34,7 +34,7 @@ void performance_test() {
timer t;
t.start();
primeSieve();
- hash_t hash = sz(primes);
+ hash_t hash = ssize(primes);
t.stop();
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
diff --git a/test/math/primitiveRoot.cpp b/test/math/primitiveRoot.cpp
index cd0b388..6ad7429 100644
--- a/test/math/primitiveRoot.cpp
+++ b/test/math/primitiveRoot.cpp
@@ -63,7 +63,7 @@ void stress_test2() {
map<ll, int> facts;
factor(x, facts);
if (x % 2 == 0) facts.erase(facts.find(2));
- bool expected = sz(facts) == 1;
+ bool expected = ssize(facts) == 1;
if (x % 4 == 0) expected = false;
if (x == 2 || x == 4) expected = true;
diff --git a/test/math/shortModInv.cpp b/test/math/shortModInv.cpp
index 26960bf..565989c 100644
--- a/test/math/shortModInv.cpp
+++ b/test/math/shortModInv.cpp
@@ -7,7 +7,7 @@ void stress_test() {
ll n = Random::integer<ll>(2, 1'000'000'000);
ll x = 0;
do {
- x = Random::integer<ll>(0, n);
+ x = Random::integer<ll>(0, 1'000'000'000);
} while (gcd(x, n) != 1);
ll y = multInv(x, n);
ll got = (x*y) % n;
diff --git a/test/math/transforms/fft.cpp b/test/math/transforms/fft.cpp
index 858676b..66df1bf 100644
--- a/test/math/transforms/fft.cpp
+++ b/test/math/transforms/fft.cpp
@@ -2,14 +2,14 @@
#include <math/transforms/fft.cpp>
vector<cplx> to_cplx(const vector<ll>& in) {
- vector<cplx> res(sz(in));
- for (int i = 0; i < sz(in); i++) res[i] = in[i];
+ vector<cplx> res(ssize(in));
+ for (int i = 0; i < ssize(in); i++) res[i] = in[i];
return res;
}
vector<ll> from_cplx(const vector<cplx>& in) {
- vector<ll> res(sz(in));
- for (int i = 0; i < sz(in); i++) res[i] = llround(real(in[i]));
+ vector<ll> res(ssize(in));
+ for (int i = 0; i < ssize(in); i++) res[i] = llround(real(in[i]));
return res;
}
diff --git a/test/math/transforms/fftMul.cpp b/test/math/transforms/fftMul.cpp
index 5933864..7887a5e 100644
--- a/test/math/transforms/fftMul.cpp
+++ b/test/math/transforms/fftMul.cpp
@@ -5,21 +5,21 @@
#include <math/transforms/fftMul.cpp>
vector<ll> from_cplx(const vector<cplx>& in) {
- vector<ll> res(sz(in));
- for (int i = 0; i < sz(in); i++) res[i] = llround(real(in[i]));
+ vector<ll> res(ssize(in));
+ for (int i = 0; i < ssize(in); i++) res[i] = llround(real(in[i]));
return res;
}
vector<ll> naive(const vector<ll>& a, const vector<ll>& b) {
vector<ll> res;
for (ll i = 1;; i *= 2) {
- if (sz(a) + sz(b) <= i) {
+ if (ssize(a) + ssize(b) <= i) {
res.resize(i, 0);
break;
}
}
- for (int i = 0; i < sz(a); i++) {
- for (int j = 0; j < sz(b); j++) {
+ for (int i = 0; i < ssize(a); i++) {
+ for (int j = 0; j < ssize(b); j++) {
res[i+j] += a[i] * b[j];
}
}
diff --git a/test/math/transforms/multiplyBitwise.cpp b/test/math/transforms/multiplyBitwise.cpp
index bc73290..8b9eb2f 100644
--- a/test/math/transforms/multiplyBitwise.cpp
+++ b/test/math/transforms/multiplyBitwise.cpp
@@ -6,13 +6,13 @@
vector<ll> naive(const vector<ll>& a, const vector<ll>& b) {
vector<ll> res;
for (ll i = 1;; i *= 2) {
- if (sz(a) <= i && sz(b) <= i) {
+ if (ssize(a) <= i && ssize(b) <= i) {
res.resize(i, 0);
break;
}
}
- for (int i = 0; i < sz(a); i++) {
- for (int j = 0; j < sz(b); j++) {
+ for (int i = 0; i < ssize(a); i++) {
+ for (int j = 0; j < ssize(b); j++) {
res[i&j] += a[i] * b[j];
}
}
diff --git a/test/math/transforms/multiplyFFT.cpp b/test/math/transforms/multiplyFFT.cpp
index 782be1b..61040d0 100644
--- a/test/math/transforms/multiplyFFT.cpp
+++ b/test/math/transforms/multiplyFFT.cpp
@@ -6,13 +6,13 @@
vector<ll> naive(const vector<ll>& a, const vector<ll>& b) {
vector<ll> res;
for (ll i = 1;; i *= 2) {
- if (sz(a) + sz(b) <= i) {
+ if (ssize(a) + ssize(b) <= i) {
res.resize(i, 0);
break;
}
}
- for (int i = 0; i < sz(a); i++) {
- for (int j = 0; j < sz(b); j++) {
+ for (int i = 0; i < ssize(a); i++) {
+ for (int j = 0; j < ssize(b); j++) {
res[i+j] += a[i] * b[j];
}
}
diff --git a/test/math/transforms/multiplyNTT.cpp b/test/math/transforms/multiplyNTT.cpp
index 70fc137..6424c50 100644
--- a/test/math/transforms/multiplyNTT.cpp
+++ b/test/math/transforms/multiplyNTT.cpp
@@ -6,13 +6,13 @@
vector<ll> naive(const vector<ll>& a, const vector<ll>& b) {
vector<ll> res;
for (ll i = 1;; i *= 2) {
- if (sz(a) + sz(b) <= i) {
+ if (ssize(a) + ssize(b) <= i) {
res.resize(i, 0);
break;
}
}
- for (int i = 0; i < sz(a); i++) {
- for (int j = 0; j < sz(b); j++) {
+ for (int i = 0; i < ssize(a); i++) {
+ for (int j = 0; j < ssize(b); j++) {
res[i+j] += a[i] * b[j];
res[i+j] %= mod;
}
diff --git a/test/math/transforms/seriesOperations.cpp b/test/math/transforms/seriesOperations.cpp
index ee30e00..f78541d 100644
--- a/test/math/transforms/seriesOperations.cpp
+++ b/test/math/transforms/seriesOperations.cpp
@@ -24,7 +24,7 @@ namespace reference {//checked against yosupo
}
vector<ll> poly_deriv(vector<ll> a){
- for(int i = 0; i < sz(a)-1; i++)
+ for(int i = 0; i < ssize(a)-1; i++)
a[i] = a[i+1] * (i+1) % mod;
a.pop_back();
return a;
@@ -32,8 +32,8 @@ namespace reference {//checked against yosupo
vector<ll> poly_integr(vector<ll> a){
if(a.empty()) return {0};
- a.push_back(a.back() * powMod(sz(a), mod-2, mod) % mod);
- for(int i = sz(a)-2; i > 0; i--)
+ a.push_back(a.back() * powMod(ssize(a), mod-2, mod) % mod);
+ for(int i = ssize(a)-2; i > 0; i--)
a[i] = a[i-1] * powMod(i, mod-2, mod) % mod;
a[0] = 0;
return a;
@@ -51,7 +51,7 @@ namespace reference {//checked against yosupo
for(int len = 1; len < n; len *= 2){
vector<ll> p = poly_log(q, 2*len);
for(int i = 0; i < 2*len; i++)
- p[i] = (mod - p[i] + (i < sz(a) ? a[i] : 0)) % mod;
+ p[i] = (mod - p[i] + (i < ssize(a) ? a[i] : 0)) % mod;
vector<ll> q2 = q;
q2.resize(2*len);
ntt(p), ntt(q2);
diff --git a/test/missing.ignore b/test/missing.ignore
new file mode 100644
index 0000000..c5f97bc
--- /dev/null
+++ b/test/missing.ignore
@@ -0,0 +1,7 @@
+datastructures/pbds.cpp
+other/pragmas.cpp
+other/stuff.cpp
+other/timed.cpp
+tests/gcc5bug.cpp
+tests/precision.cpp
+tests/whitespace.cpp
diff --git a/test/other/bitOps.cpp b/test/other/bitOps.cpp
index 44f6297..2250521 100644
--- a/test/other/bitOps.cpp
+++ b/test/other/bitOps.cpp
@@ -31,9 +31,7 @@ ll naive(ll x) {
bits.push_back(x & 1);
x >>= 1;
}
- reverse(all(bits));
- next_permutation(all(bits));
- reverse(all(bits));
+ ranges::next_permutation(bits | views::reverse);
x = 0;
for (ll i = 0, j = 1; i < 64; i++, j <<= 1) {
if (bits[i] != 0) x |= j;
@@ -56,4 +54,4 @@ void test_nextPerm() {
int main() {
test_subsets();
test_nextPerm();
-} \ No newline at end of file
+}
diff --git a/test/other/josephus2.cpp b/test/other/josephus2.cpp
index 85a9d28..f2c0440 100644
--- a/test/other/josephus2.cpp
+++ b/test/other/josephus2.cpp
@@ -4,8 +4,8 @@
template<ll O>
ll naive(ll n, ll k) {
vector<ll> state(n);
- iota(all(state), O);
- for (ll i = k-1; state.size() > 1; i = (i + k - 1) % sz(state)) {
+ iota(begin(state), end(state), O);
+ for (ll i = k-1; state.size() > 1; i = (i + k - 1) % ssize(state)) {
state.erase(state.begin() + i);
}
return state[0];
@@ -15,7 +15,7 @@ void stress_test() {
ll tests = 0;
for (ll i = 1; i < 2'000; i++) {
auto got = rotateLeft(i);
- auto expected = naive<1>(i, 2);
+ auto expected = naive<0>(i, 2);
if (got != expected) cerr << "error: " << i << FAIL;
tests++;
}
diff --git a/test/other/josephusK.cpp b/test/other/josephusK.cpp
index e837640..1a5aa9d 100644
--- a/test/other/josephusK.cpp
+++ b/test/other/josephusK.cpp
@@ -5,8 +5,8 @@
template<ll O>
ll naive(ll n, ll k) {
vector<ll> state(n);
- iota(all(state), O);
- for (ll i = k-1; state.size() > 1; i = (i + k - 1) % sz(state)) {
+ iota(begin(state), end(state), O);
+ for (ll i = k-1; state.size() > 1; i = (i + k - 1) % ssize(state)) {
state.erase(state.begin() + i);
}
return state[0];
diff --git a/test/other/pbs.cpp b/test/other/pbs.cpp
index ba3b9d0..e1dfea0 100644
--- a/test/other/pbs.cpp
+++ b/test/other/pbs.cpp
@@ -49,7 +49,7 @@ void stress_test() {
for (int i=1; i<n; i++) {
edges.emplace_back(Random::integer(0, i), i);
}
- shuffle(all(edges), Random::rng);
+ ranges::shuffle(edges, Random::rng);
queries.clear();
for (int i=0; i<Q; i++) {
auto x = Random::distinct(2, n);
@@ -80,7 +80,7 @@ void performance_test() {
for (int i=1; i<n; i++) {
edges.emplace_back(Random::integer(0, i), i);
}
- shuffle(all(edges), Random::rng);
+ ranges::shuffle(edges, Random::rng);
queries.clear();
for (int i=0; i<Q; i++) {
auto x = Random::distinct(2, n);
@@ -91,7 +91,7 @@ void performance_test() {
t.start();
vector<int> ans = pbs(Q, MAX_OPERATIONS);
t.stop();
- ll hash = accumulate(all(ans), 0LL);
+ ll hash = accumulate(begin(ans), end(ans), 0LL);
if (t.time > 700) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
diff --git a/test/other/sos.cpp b/test/other/sos.cpp
deleted file mode 100644
index f3a6109..0000000
--- a/test/other/sos.cpp
+++ /dev/null
@@ -1,50 +0,0 @@
-#include "../util.h"
-
-vector<ll> sos(const vector<ll>& in) {
- #include <other/sos.cpp>
- return res;
-}
-
-vector<ll> naive(const vector<ll>& in) {
- vector<ll> res(sz(in));
- for (ll i = 0; i < sz(in); i++) {
- for (ll j = 0; j <= i; j++) {
- if ((i | j) == i) {
- res[i] += in[j];
- }
- }
- }
- return res;
-}
-
-void stress_test() {
- ll tests = 0;
- for (ll i = 0; i < 1000; i++) {
- int n = Random::integer<int>(1, 100);
- auto in = Random::integers<ll>(n, -1000, 1000);
- auto got = sos(in);
- auto expected = naive(in);
- if (got != expected) cerr << "error: " << i << FAIL;
- tests += n;
- }
- cerr << "tested random queries: " << tests << endl;
-}
-
-constexpr int N = 10'000'000;
-void performance_test() {
- timer t;
- auto in = Random::integers<ll>(N, -1000, 1000);
- t.start();
- auto res = sos(in);
- t.stop();
- hash_t hash = 0;
- for (ll x : res) hash += x;
- if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
- cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
-}
-
-int main() {
- stress_test();
- performance_test();
-}
-
diff --git a/test/string/deBruijn.cpp b/test/string/deBruijn.cpp
index 6b3fea4..eb82b59 100644
--- a/test/string/deBruijn.cpp
+++ b/test/string/deBruijn.cpp
@@ -5,13 +5,13 @@
bool isDeBruijn(string s, int n, int k) {
ll expected = 1;
for (ll i = 0; i < n; i++) expected *= k;
- if (expected != sz(s)) return false;
+ if (expected != ssize(s)) return false;
s += s;
set<string_view> seen;
- for (ll i = 0; 2*i < sz(s); i++) {
+ for (ll i = 0; 2*i < ssize(s); i++) {
seen.insert(string_view(s).substr(i, n));
}
- return sz(seen) == expected;
+ return ssize(seen) == expected;
}
void stress_test() {
@@ -21,7 +21,7 @@ void stress_test() {
auto [l, r] = Random::pair<char>('b', 'f');
auto got = deBruijn(n, l, r);
if (!isDeBruijn(got, n, r - l + 1)) cerr << "error" << FAIL;
- queries += sz(got);
+ queries += ssize(got);
}
cerr << "tested random queries: " << queries << endl;
}
@@ -32,7 +32,7 @@ void performance_test() {
t.start();
auto res = deBruijn(N, '0', '1');
t.stop();
- hash_t hash = sz(res);
+ hash_t hash = ssize(res);
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/string/duval.cpp b/test/string/duval.cpp
index 58b4a44..5ebc96c 100644
--- a/test/string/duval.cpp
+++ b/test/string/duval.cpp
@@ -6,8 +6,8 @@ constexpr int N = 20'000'000;
bool isLyndon(string_view s) {
string t = string(s) + string(s);
- for (ll i = 1; i < sz(s); i++) {
- if (s >= t.substr(i, sz(s))) return false;
+ for (ll i = 1; i < ssize(s); i++) {
+ if (s >= t.substr(i, ssize(s))) return false;
}
return !s.empty();
}
@@ -21,11 +21,11 @@ void stress_test_duval() {
if (got.empty()) cerr << "error: a" << FAIL;
if (got.front().first != 0) cerr << "error: b" << FAIL;
if (got.back().second != n) cerr << "error: c" << FAIL;
- for (int j = 1; j < sz(got); j++) {
- if (got[j - 1].second != got[j].first) cerr << "error: d" << FAIL;
+ for (int j = 1; j < ssize(got); j++) {
+ if (got[j - 1].second != got[j].first) cerr << "error: d" << FAIL;
}
for (auto [l, r] : got) {
- if (!isLyndon(string_view(s).substr(l, r-l))) cerr << "error: e" << FAIL;
+ if (!isLyndon(string_view(s).substr(l, r-l))) cerr << "error: e" << FAIL;
}
queries += n;
}
@@ -45,7 +45,7 @@ void performance_test_duval() {
}
int naive(string s) {
- ll n = sz(s);
+ ll n = ssize(s);
s += s;
int res = 0;
for (int i = 0; i < n; i++) {
diff --git a/test/string/kmp.cpp b/test/string/kmp.cpp
index 9c9c924..2364efd 100644
--- a/test/string/kmp.cpp
+++ b/test/string/kmp.cpp
@@ -2,8 +2,8 @@
#include <string/kmp.cpp>
vector<int> naive(string_view s) {
- vector<int> res(sz(s) + 1, -1);
- for (int i = 0; i < sz(s); i++) {
+ vector<int> res(ssize(s) + 1, -1);
+ for (int i = 0; i < ssize(s); i++) {
for (int j = 0; j <= i; j++)
if (s.substr(0, j) == s.substr(i-j+1, j))
res[i+1] = j;
diff --git a/test/string/longestCommonSubsequence.cpp b/test/string/longestCommonSubsequence.cpp
index 6d7a6c5..128c3c1 100644
--- a/test/string/longestCommonSubsequence.cpp
+++ b/test/string/longestCommonSubsequence.cpp
@@ -4,19 +4,19 @@
bool isSubstr(string_view s, string_view sub) {
int i = 0;
for (char c : s) {
- if (i < sz(sub) && c == sub[i]) i++;
+ if (i < ssize(sub) && c == sub[i]) i++;
}
- return i >= sz(sub);
+ return i >= ssize(sub);
}
string naive(string_view s, string_view t) {
string res = "";
- for (ll i = 1; i < (1ll << sz(s)); i++) {
+ for (ll i = 1; i < (1ll << ssize(s)); i++) {
string tmp;
- for (ll j = 0; j < sz(s); j++) {
+ for (ll j = 0; j < ssize(s); j++) {
if (((i >> j) & 1) != 0) tmp.push_back(s[j]);
}
- if (sz(tmp) >= sz(res) && isSubstr(t, tmp)) res = tmp;
+ if (ssize(tmp) >= ssize(res) && isSubstr(t, tmp)) res = tmp;
}
return res;
}
@@ -44,7 +44,7 @@ void performance_test() {
t.start();
auto res = lcss(a, b);
t.stop();
- hash_t hash = sz(res);
+ hash_t hash = ssize(res);
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/string/lyndon.cpp b/test/string/lyndon.cpp
index ecf2dad..6710973 100644
--- a/test/string/lyndon.cpp
+++ b/test/string/lyndon.cpp
@@ -3,8 +3,8 @@
bool isLyndon(string_view s) {
string t = string(s) + string(s);
- for (ll i = 1; i < sz(s); i++) {
- if (s >= t.substr(i, sz(s))) return false;
+ for (ll i = 1; i < ssize(s); i++) {
+ if (s >= t.substr(i, ssize(s))) return false;
}
return !s.empty();
}
@@ -12,8 +12,8 @@ bool isLyndon(string_view s) {
vector<string> naive(ll n, char mi, char ma) {
vector<string> res;
auto dfs = [&](auto&& self, string pref)->void{
- if (sz(pref) <= n && isLyndon(pref)) res.push_back(pref);
- if (sz(pref) >= n) return;
+ if (ssize(pref) <= n && isLyndon(pref)) res.push_back(pref);
+ if (ssize(pref) >= n) return;
for (char c = mi; c <= ma; c++) {
self(self, pref + c);
}
@@ -39,7 +39,7 @@ void stress_test() {
auto got = fast(n, l, r);
auto expected = naive(n, l, r);
if (got != expected) cerr << "error" << FAIL;
- queries += sz(expected);
+ queries += ssize(expected);
}
cerr << "tested random queries: " << queries << endl;
}
@@ -50,7 +50,7 @@ void performance_test() {
t.start();
auto res = fast(N, 'a', 'f');
t.stop();
- hash_t hash = sz(res);
+ hash_t hash = ssize(res);
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/string/manacher.cpp b/test/string/manacher.cpp
index 503d181..803154b 100644
--- a/test/string/manacher.cpp
+++ b/test/string/manacher.cpp
@@ -2,16 +2,16 @@
#include <string/manacher.cpp>
vector<int> naive(string_view s) {
- vector<int> res(2 * sz(s) + 1);
- for (int i = 0; i < sz(s); i++) { //odd palindromes
+ vector<int> res(2 * ssize(s) + 1);
+ for (int i = 0; i < ssize(s); i++) { //odd palindromes
int j = 2*i+1;
- while (i+res[j] < sz(s) && i-res[j] >= 0 && s[i-res[j]] == s[i+res[j]]) res[j]++;
+ while (i+res[j] < ssize(s) && i-res[j] >= 0 && s[i-res[j]] == s[i+res[j]]) res[j]++;
res[j]*=2;
res[j]--;
}
- for (int i = 0; i <= sz(s); i++) { //even palindromes
+ for (int i = 0; i <= ssize(s); i++) { //even palindromes
int j = 2*i;
- while (i+res[j] < sz(s) && i-res[j]-1 >= 0 && s[i-res[j]-1] == s[i+res[j]]) res[j]++;
+ while (i+res[j] < ssize(s) && i-res[j]-1 >= 0 && s[i-res[j]-1] == s[i+res[j]]) res[j]++;
res[j] *= 2;
}
return res;
diff --git a/test/string/rollingHash.cpp b/test/string/rollingHash.cpp
index 0491bc0..a9dace5 100644
--- a/test/string/rollingHash.cpp
+++ b/test/string/rollingHash.cpp
@@ -3,7 +3,7 @@
string thueMorse(ll n) {
string res = "a";
- while (sz(res) < n) {
+ while (ssize(res) < n) {
string tmp = res;
for (char& c : tmp) c ^= 1;
res += tmp;
@@ -12,7 +12,7 @@ string thueMorse(ll n) {
}
auto getHash(const string& s) {
- return Hash(s)(0, sz(s));
+ return Hash(s)(0, ssize(s));
}
void testThueMorse() {
@@ -20,13 +20,13 @@ void testThueMorse() {
set<string> expected;
string s = thueMorse(1000);
Hash h(s);
- for (int l = 0; l < sz(s); l++) {
- for (int r = l + 1; r <= sz(s); r++) {
+ for (int l = 0; l < ssize(s); l++) {
+ for (int r = l + 1; r <= ssize(s); r++) {
got.insert(h(l, r));
expected.insert(s.substr(l, r - l));
}
}
- if (sz(got) != sz(expected)) cerr << "error: thueMorse" << FAIL;
+ if (ssize(got) != ssize(expected)) cerr << "error: thueMorse" << FAIL;
cerr << "thueMorse: ok" << endl;
}
@@ -43,13 +43,13 @@ void testSmall() {
auto dfs = [&](auto&& self, string pref)->void {
expected++;
got.insert(getHash(pref));
- if(sz(pref) >= 5) return;
+ if(ssize(pref) >= 5) return;
for (char c = 'a'; c <= 'z'; c++) {
self(self, pref + c);
}
};
dfs(dfs, "");
- if (sz(got) != expected) cerr << "error: small" << FAIL;
+ if (ssize(got) != expected) cerr << "error: small" << FAIL;
cerr << "small: ok" << endl;
}
@@ -58,13 +58,13 @@ void stress_test() {
set<string> expected;
string s = Random::string(1000, "abc");
Hash h(s);
- for (int l = 0; l < sz(s); l++) {
- for (int r = l + 1; r <= sz(s); r++) {
+ for (int l = 0; l < ssize(s); l++) {
+ for (int r = l + 1; r <= ssize(s); r++) {
got.insert(h(l, r));
expected.insert(s.substr(l, r - l));
}
}
- if (sz(got) != sz(expected)) cerr << "error: stress test" << FAIL;
+ if (ssize(got) != ssize(expected)) cerr << "error: stress test" << FAIL;
cerr << "stress test: ok" << endl;
}
diff --git a/test/string/rollingHashCf.cpp b/test/string/rollingHashCf.cpp
index 79003de..f7ce357 100644
--- a/test/string/rollingHashCf.cpp
+++ b/test/string/rollingHashCf.cpp
@@ -5,7 +5,7 @@ constexpr ll RandomQ = 318LL << 53;
string thueMorse(ll n) {
string res = "a";
- while (sz(res) < n) {
+ while (ssize(res) < n) {
string tmp = res;
for (char& c : tmp) c ^= 1;
res += tmp;
@@ -14,7 +14,7 @@ string thueMorse(ll n) {
}
auto getHash(const string& s) {
- return Hash(s, RandomQ)(0, sz(s));
+ return Hash(s, RandomQ)(0, ssize(s));
}
void testThueMorse() {
@@ -22,13 +22,13 @@ void testThueMorse() {
set<string> expected;
string s = thueMorse(1000);
Hash h(s, RandomQ);
- for (int l = 0; l < sz(s); l++) {
- for (int r = l + 1; r <= sz(s); r++) {
+ for (int l = 0; l < ssize(s); l++) {
+ for (int r = l + 1; r <= ssize(s); r++) {
got.insert(h(l, r));
expected.insert(s.substr(l, r - l));
}
}
- if (sz(got) != sz(expected)) cerr << "error: thueMorse" << FAIL;
+ if (ssize(got) != ssize(expected)) cerr << "error: thueMorse" << FAIL;
cerr << "thueMorse: ok" << endl;
}
@@ -45,13 +45,13 @@ void testSmall() {
auto dfs = [&](auto&& self, string pref)->void {
expected++;
got.insert(getHash(pref));
- if(sz(pref) >= 5) return;
+ if(ssize(pref) >= 5) return;
for (char c = 'a'; c <= 'z'; c++) {
self(self, pref + c);
}
};
dfs(dfs, "");
- if (sz(got) != expected) cerr << "error: small" << FAIL;
+ if (ssize(got) != expected) cerr << "error: small" << FAIL;
cerr << "small: ok" << endl;
}
@@ -60,13 +60,13 @@ void stress_test() {
set<string> expected;
string s = Random::string(1000, "abc");
Hash h(s, RandomQ);
- for (int l = 0; l < sz(s); l++) {
- for (int r = l + 1; r <= sz(s); r++) {
+ for (int l = 0; l < ssize(s); l++) {
+ for (int r = l + 1; r <= ssize(s); r++) {
got.insert(h(l, r));
expected.insert(s.substr(l, r - l));
}
}
- if (sz(got) != sz(expected)) cerr << "error: stress test" << FAIL;
+ if (ssize(got) != ssize(expected)) cerr << "error: stress test" << FAIL;
cerr << "stress test: ok" << endl;
}
diff --git a/test/string/suffixArray.cpp b/test/string/suffixArray.cpp
index 4945d8e..a1db190 100644
--- a/test/string/suffixArray.cpp
+++ b/test/string/suffixArray.cpp
@@ -2,9 +2,9 @@
#include <string/suffixArray.cpp>
vector<int> naive(string_view s) {
- vector<int> SA(sz(s));
- iota(all(SA), 0);
- sort(all(SA), [s](int a, int b){
+ vector<int> SA(ssize(s));
+ iota(begin(SA), end(SA), 0);
+ ranges::sort(SA, [s](int a, int b){
return s.substr(a) < s.substr(b);
});
return SA;
@@ -12,7 +12,7 @@ vector<int> naive(string_view s) {
int lcp(string_view s, int x, int y) {
int res = 0;
- while (x + res < sz(s) && y + res < sz(s) && s[x + res] == s[y + res]) res++;
+ while (x + res < ssize(s) && y + res < ssize(s) && s[x + res] == s[y + res]) res++;
return res;
}
@@ -50,7 +50,7 @@ void performance_test() {
SuffixArray sa(s);
t.stop();
hash_t hash = 0;
- for (int i = 0; i < sz(sa.SA); i++) hash += i*sa.SA[i];
+ for (int i = 0; i < ssize(sa.SA); i++) hash += i*sa.SA[i];
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/string/suffixAutomaton.cpp b/test/string/suffixAutomaton.cpp
index c2ff511..cab555e 100644
--- a/test/string/suffixAutomaton.cpp
+++ b/test/string/suffixAutomaton.cpp
@@ -4,10 +4,10 @@
pair<int, int> naive(string_view s, string_view t) {
int pos = 0;
int len = 0;
- for (int j = 0; j < sz(t); j++) {
- for (int i = 0; i < sz(s); i++) {
+ for (int j = 0; j < ssize(t); j++) {
+ for (int i = 0; i < ssize(s); i++) {
int cur = 0;
- while (i+cur < sz(s) && j+cur < sz(t) && s[i+cur] == t[j+cur]) cur++;
+ while (i+cur < ssize(s) && j+cur < ssize(t) && s[i+cur] == t[j+cur]) cur++;
if (cur > len) {
pos = j;
len = cur;
@@ -43,7 +43,7 @@ void performance_test() {
SuffixAutomaton sa(s);
t.stop();
hash_t hash = 0;
- for (ll c = 0; c < sz(s);) {
+ for (ll c = 0; c < ssize(s);) {
int m = Random::integer<int>(1, 1000);
s = Random::string(m, "abc");
t.start();
diff --git a/test/string/suffixTree.cpp b/test/string/suffixTree.cpp
index c0d79e4..69c24fe 100644
--- a/test/string/suffixTree.cpp
+++ b/test/string/suffixTree.cpp
@@ -2,8 +2,8 @@
#include <string/suffixTree.cpp>
vector<string> naive(string_view s) {
- vector<string> res(sz(s));
- for (ll i = 0; i < sz(s); i++) {
+ vector<string> res(ssize(s));
+ for (ll i = 0; i < ssize(s); i++) {
res[i] = s.substr(i);
}
return res;
@@ -19,7 +19,7 @@ void stress_test() {
auto dfs = [&](auto&& self, string pref, ll node) -> void {
auto& [l, r, _, next] = st.tree[node];
if (l >= 0) pref += s.substr(l, r - l);
- if (pref.back() == '#') got[n + 1 - sz(pref)] = pref;
+ if (pref.back() == '#') got[n + 1 - ssize(pref)] = pref;
for (auto [__, j] : next) {
self(self, pref, j);
}
@@ -39,7 +39,7 @@ void performance_test() {
t.start();
SuffixTree st(s);
t.stop();
- hash_t hash = sz(st.tree);
+ hash_t hash = ssize(st.tree);
if (t.time > 500) cerr << "too slow: " << t.time << FAIL;
cerr << "tested performance: " << t.time << "ms (hash: " << hash << ")" << endl;
}
diff --git a/test/string/z.cpp b/test/string/z.cpp
index f890a3e..3c76939 100644
--- a/test/string/z.cpp
+++ b/test/string/z.cpp
@@ -2,9 +2,9 @@
#include <string/z.cpp>
vector<int> naive(const string& s) {
- vector<int> res(sz(s));
- for (int i = 1; i < sz(s); i++) {
- while (i + res[i] < sz(s) && s[res[i]] == s[i + res[i]]) res[i]++;
+ vector<int> res(ssize(s));
+ for (int i = 1; i < ssize(s); i++) {
+ while (i + res[i] < ssize(s) && s[res[i]] == s[i + res[i]]) res[i]++;
}
return res;
}
diff --git a/test/test.sh b/test/test.sh
deleted file mode 100755
index 6609f1a..0000000
--- a/test/test.sh
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/bin/bash
-set -e
-cd "$(dirname "$0")"
-ulimit -s 4000000
-export MALLOC_PERTURB_="$((2#01011001))"
-shopt -s lastpipe
-
-declare -A cppstandard
-cppstandard["string/suffixArray.cpp"]="gnu++20"
-cppstandard["other/pbs.cpp"]="gnu++20"
-seedmacro=""
-
-process_awk() {
- awk_file=$(realpath --relative-to="${PWD}" "${1}")
- cpp_file=${awk_file%.awk}
- folder=$(dirname $awk_file)
- #echo "$awk_file"
- mkdir -p "./awk/$folder"
- awk -f "$awk_file" < "../content/$cpp_file" > "./awk/$cpp_file"
-}
-
-test_file() {
- file=$(realpath --relative-to="${PWD}" "${1}")
- echo "$file:"
- echo "compiling..."
- std="gnu++17"
- if [[ -v cppstandard[$file] ]]; then
- std=${cppstandard[$file]}
- fi
- g++ -std=$std "$file" -I ./awk/ -I ../content/ -O2 -Wall -Wextra -Wshadow -Werror $seedmacro
- echo "running..."
- timeout --foreground 60s ./a.out
- echo ""
- rm ./a.out
-}
-
-list_missing() {
- declare -A ignore
- ignore["other/bitOps.cpp"]=1
- ignore["other/pragmas.cpp"]=1
- ignore["other/stuff.cpp"]=1
- ignore["other/timed.cpp"]=1
- ignore["tests/gcc5bug.cpp"]=1
- ignore["tests/precision.cpp"]=1
- ignore["tests/whitespace.cpp"]=1
-
- total=0
- missing=0
-
- if [[ ! -v $1 ]]; then
- echo "missing tests:"
- fi
- find ../content/ -type f -name '*.cpp' -print0 | sort -z | while read -d $'\0' file
- do
- total=$((total+1))
- file=${file#../content/}
- if [ ! -f "$file" ] && [[ ! -v ignore["$file"] ]]; then
- missing=$((missing+1))
- if [[ ! -v $1 ]]; then
- echo " $file"
- fi
- fi
- done
- if [[ -v $1 ]]; then
- covered=$((total-missing))
- coverage=$((100*covered/total))
- echo "REQUIRED=$(( total < 4 ? 0 : total - 4 ))"
- echo "TOTAL=$total"
- echo "COVERED=$covered"
- echo "MISSING=$missing"
- fi
-}
-
-coverage() {
- list_missing 1
-}
-
-rm -rf ./awk/
-find . -type f -path '*.awk' -print0 | sort -z | while read -d $'\0' file
-do
- process_awk "$file"
-done
-
-if [ "$#" -ne 0 ]; then
- for arg in "$@"
- do
- if [[ $arg == "--awk" ]]; then
- echo "processed all awk files"
- elif [[ $arg == "--missing" ]]; then
- list_missing
- elif [[ $arg == "--coverage" ]]; then
- coverage
- elif [[ $arg == --seed=* ]]; then
- seedmacro="-DSEED=${arg:7}ll"
- elif [ -d "$arg" ]; then
- dir=$(realpath --relative-to="${PWD}" "$arg")
- find . -type f -path "./${dir}/*.cpp" -not -path './awk/*' -print0 | sort -z | while read -d $'\0' file
- do
- test_file "$file"
- done
- elif [ -f "$arg" ]; then
- test_file "$arg"
- else
- echo "did not recognize: $arg"
- fi
- done
-else
- find . -type f -path '*.cpp' -not -path './awk/*' -print0 | sort -z | while read -d $'\0' file
- do
- test_file "$file"
- done
- list_missing
-fi
-
diff --git a/test/util.h b/test/util.h
index 6f23b82..e0d9b57 100644
--- a/test/util.h
+++ b/test/util.h
@@ -1,9 +1,6 @@
#include <bits/stdc++.h>
using namespace std;
-#define all(x) std::begin(x), std::end(x)
-#define sz(x) (ll)std::size(x)
-
using ll = long long;
using lll = __int128;
using ld = long double;
@@ -13,6 +10,14 @@ namespace INT {constexpr int INF = 0x3FFF'FFFF;}
namespace LL {constexpr ll INF = 0x3FFF'FFFF'FFFF'FFFFll;}
namespace LD {constexpr ld INF = numeric_limits<ld>::infinity();}
+template<typename T>
+T _lg_check(T n) {
+ assert(n > 0);
+ return __lg(n);
+}
+
+#define __lg _lg_check
+
namespace details {
template<typename T = ll>
bool isPrime(T x) {
@@ -109,7 +114,7 @@ namespace Random {
std::string string(std::size_t n, string_view chars) {
std::string res(n, '*');
- for (char& c : res) c = chars[integer(sz(chars))];
+ for (char& c : res) c = chars[integer(ssize(chars))];
return res;
}
@@ -168,6 +173,30 @@ namespace Random {
exit(1);
}
+namespace detail {
+ double benchmark() {
+ mt19937 rng(734820734);
+ vector<unsigned> a(10000000);
+ for (unsigned &x: a) x = rng();
+ chrono::steady_clock::time_point start = chrono::steady_clock::now();
+ vector<unsigned> dp(ssize(a)+1, numeric_limits<unsigned>::max());
+ int res = 0;
+ for (unsigned x: a) {
+ auto it = ranges::upper_bound(dp, x);
+ res = max(res, (int)(it - begin(dp)));
+ *it = x;
+ }
+ chrono::steady_clock::time_point end = chrono::steady_clock::now();
+ assert(res == 6301);
+ double t =
+ chrono::duration_cast<chrono::duration<double, milli>>(end - start)
+ .count();
+ return 30/t;
+ }
+
+ double speed = benchmark();
+}
+
struct timer {
bool running = false;
double time = 0;
@@ -183,7 +212,7 @@ struct timer {
auto end = chrono::steady_clock::now();
if (!running) cerr << "timer not running!" << FAIL;
running = false;
- time += chrono::duration_cast<chrono::duration<double, milli>>(end - begin).count();
+ time += chrono::duration_cast<chrono::duration<double, milli>>(end - begin).count() * detail::speed;
}
void reset() {
@@ -208,7 +237,7 @@ namespace c20 {
return {{a[I]...}};
}
}
-
+
template<class T, std::size_t N>
constexpr std::array<std::remove_cv_t<T>, N> to_array(T (&a)[N]) {
return c20::detail::to_array_impl(a, std::make_index_sequence<N>{});
@@ -257,9 +286,9 @@ public:
Graph(int n) : adj(n) {}
- int m() const {return sz(edges);}
- int n() const {return sz(adj);}
- int deg(int x) const {return sz(adj[x]);}
+ int m() const {return ssize(edges);}
+ int n() const {return ssize(adj);}
+ int deg(int x) const {return ssize(adj[x]);}
Graph& clear() {
adj.assign(adj.size(), {});
@@ -271,33 +300,33 @@ public:
if (!LOOPS && from == to) return false;
if (!MULTI && adj[from].find(to) != adj[from].end()) return false;
edges.emplace_back(from, to, w);
- _addAdj(sz(edges) - 1);
+ _addAdj(ssize(edges) - 1);
return true;
}
Graph& reverse() {
for (auto& e : edges) swap(e.from, e.to);
adj.assign(adj.size(), {});
- for (int i = 0; i < sz(edges); i++) _addAdj(i);
+ for (int i = 0; i < ssize(edges); i++) _addAdj(i);
return *this;
}
Graph& shuffle() {
- std::shuffle(all(edges), Random::rng);
+ ranges::shuffle(edges, Random::rng);
if constexpr (!DIR) {
for (auto& e : edges) {
if (Random::integer(0, 2)) swap(e.from, e.to);
}
}
adj.assign(adj.size(), {});
- for (int i = 0; i < sz(edges); i++) _addAdj(i);
+ for (int i = 0; i < ssize(edges); i++) _addAdj(i);
return *this;
}
Graph& permutate() {
vector<int> perm(n());
- iota(all(perm), 0);
- std::shuffle(all(perm), Random::rng);
+ iota(begin(perm), end(perm), 0);
+ ranges::shuffle(perm, Random::rng);
for (auto& e : edges) {
e.from = perm[e.from];
e.to = perm[e.to];
@@ -375,7 +404,7 @@ public:
}
}
}
- std::shuffle(all(tmp), Random::rng);
+ ranges::shuffle(tmp, Random::rng);
for (auto [a, b] : tmp) {
if (todo <= 0) break;
if (addEdge(a, b)) todo--;
@@ -413,3 +442,10 @@ ld float_error(ld given, ld expected) {
}
return numeric_limits<ld>::infinity();
}
+
+#include <ext/pb_ds/assoc_container.hpp>
+template<typename T>
+using Tree = __gnu_pbds::tree<T, __gnu_pbds::null_type, less<T>,
+ __gnu_pbds::rb_tree_tag,
+ __gnu_pbds::tree_order_statistics_node_update>;
+