From 6422747263bcea3b0d60af59adaf42c58b77ceac Mon Sep 17 00:00:00 2001 From: yangpan <892916811@qq.com> Date: Thu, 4 Jan 2024 23:47:49 +0800 Subject: [PATCH 1/6] =?UTF-8?q?=E5=9C=A8vendor=E4=B8=AD=E6=96=B0=E5=A2=9E?= =?UTF-8?q?=E5=BA=93rust-ini?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .gitignore | 1 - src/sysboostd/Cargo.toml | 1 + .../const-random-macro/.cargo-checksum.json | 1 + vendor/const-random-macro/Cargo.toml | 43 + vendor/const-random-macro/LICENSE-APACHE | 201 + vendor/const-random-macro/LICENSE-MIT | 25 + vendor/const-random-macro/src/lib.rs | 57 + vendor/const-random-macro/src/span.rs | 104 + vendor/const-random/.cargo-checksum.json | 1 + vendor/const-random/Cargo.toml | 29 + vendor/const-random/LICENSE-APACHE | 201 + vendor/const-random/LICENSE-MIT | 25 + vendor/const-random/README.md | 24 + vendor/const-random/src/lib.rs | 13 + vendor/const-random/tests/tests.rs | 57 + vendor/crunchy/.cargo-checksum.json | 1 + vendor/crunchy/Cargo.toml | 31 + vendor/crunchy/README.md | 38 + vendor/crunchy/build.rs | 253 + vendor/crunchy/src/lib.rs | 36 + vendor/dlv-list/.cargo-checksum.json | 1 + vendor/dlv-list/CHANGELOG.md | 119 + vendor/dlv-list/CONTRIBUTING.md | 74 + vendor/dlv-list/Cargo.toml | 44 + vendor/dlv-list/LICENSE-APACHE | 176 + vendor/dlv-list/LICENSE-MIT | 21 + vendor/dlv-list/README.md | 31 + vendor/dlv-list/codecov.yml | 5 + vendor/dlv-list/rustfmt.toml | 2 + vendor/dlv-list/src/lib.rs | 3546 +++++++++++ vendor/dlv-list/src/serde.rs | 113 + vendor/getrandom/.cargo-checksum.json | 1 + vendor/getrandom/CHANGELOG.md | 435 ++ vendor/getrandom/Cargo.toml | 94 + vendor/getrandom/LICENSE-APACHE | 201 + vendor/getrandom/LICENSE-MIT | 26 + vendor/getrandom/README.md | 64 + vendor/getrandom/SECURITY.md | 13 + vendor/getrandom/benches/buffer.rs | 71 + vendor/getrandom/src/3ds.rs | 18 + vendor/getrandom/src/apple-other.rs | 27 + vendor/getrandom/src/bsd_arandom.rs | 54 + vendor/getrandom/src/custom.rs | 114 + vendor/getrandom/src/dragonfly.rs | 30 + vendor/getrandom/src/emscripten.rs | 13 + vendor/getrandom/src/error.rs | 191 + vendor/getrandom/src/error_impls.rs | 24 + vendor/getrandom/src/espidf.rs | 26 + vendor/getrandom/src/fuchsia.rs | 21 + vendor/getrandom/src/hermit.rs | 21 + vendor/getrandom/src/hurd.rs | 18 + vendor/getrandom/src/js.rs | 161 + vendor/getrandom/src/lib.rs | 351 ++ vendor/getrandom/src/linux_android.rs | 48 + vendor/getrandom/src/macos.rs | 36 + vendor/getrandom/src/openbsd.rs | 22 + vendor/getrandom/src/rdrand.rs | 130 + vendor/getrandom/src/solaris_illumos.rs | 49 + vendor/getrandom/src/solid.rs | 26 + vendor/getrandom/src/use_file.rs | 138 + vendor/getrandom/src/util.rs | 101 + vendor/getrandom/src/util_libc.rs | 159 + vendor/getrandom/src/vita.rs | 21 + vendor/getrandom/src/vxworks.rs | 37 + vendor/getrandom/src/wasi.rs | 25 + vendor/getrandom/src/windows.rs | 66 + vendor/getrandom/tests/common/mod.rs | 100 + vendor/getrandom/tests/custom.rs | 54 + vendor/getrandom/tests/normal.rs | 11 + vendor/getrandom/tests/rdrand.rs | 20 + vendor/ordered-multimap/.cargo-checksum.json | 1 + vendor/ordered-multimap/CHANGELOG.md | 110 + vendor/ordered-multimap/CONTRIBUTING.md | 74 + vendor/ordered-multimap/Cargo.toml | 44 + vendor/ordered-multimap/LICENSE | 21 + vendor/ordered-multimap/README.md | 38 + vendor/ordered-multimap/codecov.yml | 5 + vendor/ordered-multimap/rustfmt.toml | 2 + vendor/ordered-multimap/src/lib.rs | 16 + .../src/list_ordered_multimap.rs | 5383 +++++++++++++++++ vendor/ordered-multimap/src/serde.rs | 135 + vendor/rust-ini/.cargo-checksum.json | 1 + vendor/rust-ini/Cargo.toml | 46 + vendor/rust-ini/LICENSE | 9 + vendor/rust-ini/README.rst | 114 + vendor/rust-ini/examples/test.rs | 44 + vendor/rust-ini/rustfmt.toml | 12 + vendor/rust-ini/src/lib.rs | 2407 ++++++++ vendor/tiny-keccak/.cargo-checksum.json | 1 + vendor/tiny-keccak/Cargo.toml | 93 + vendor/tiny-keccak/LICENSE | 121 + vendor/tiny-keccak/README.md | 70 + vendor/tiny-keccak/benches/kangaroo.rs | 19 + vendor/tiny-keccak/benches/keccak.rs | 43 + vendor/tiny-keccak/build.rs | 22 + vendor/tiny-keccak/examples/sha3.rs | 17 + vendor/tiny-keccak/src/cshake.rs | 77 + vendor/tiny-keccak/src/k12.rs | 160 + vendor/tiny-keccak/src/keccak.rs | 93 + vendor/tiny-keccak/src/keccakf.rs | 40 + vendor/tiny-keccak/src/keccakp.rs | 28 + vendor/tiny-keccak/src/kmac.rs | 114 + vendor/tiny-keccak/src/lib.rs | 501 ++ vendor/tiny-keccak/src/parallel_hash.rs | 206 + vendor/tiny-keccak/src/sha3.rs | 83 + vendor/tiny-keccak/src/shake.rs | 56 + vendor/tiny-keccak/src/tuple_hash.rs | 106 + vendor/tiny-keccak/tests/cshake.rs | 116 + vendor/tiny-keccak/tests/kangaroo.rs | 86 + vendor/tiny-keccak/tests/keccak.rs | 30 + vendor/tiny-keccak/tests/kmac.rs | 333 + vendor/tiny-keccak/tests/parallel_hash.rs | 123 + vendor/tiny-keccak/tests/sha3.rs | 94 + vendor/tiny-keccak/tests/shake.rs | 37 + vendor/tiny-keccak/tests/tuple_hash.rs | 113 + 115 files changed, 19534 insertions(+), 1 deletion(-) create mode 100644 vendor/const-random-macro/.cargo-checksum.json create mode 100644 vendor/const-random-macro/Cargo.toml create mode 100644 vendor/const-random-macro/LICENSE-APACHE create mode 100644 vendor/const-random-macro/LICENSE-MIT create mode 100644 vendor/const-random-macro/src/lib.rs create mode 100644 vendor/const-random-macro/src/span.rs create mode 100644 vendor/const-random/.cargo-checksum.json create mode 100644 vendor/const-random/Cargo.toml create mode 100644 vendor/const-random/LICENSE-APACHE create mode 100644 vendor/const-random/LICENSE-MIT create mode 100644 vendor/const-random/README.md create mode 100644 vendor/const-random/src/lib.rs create mode 100644 vendor/const-random/tests/tests.rs create mode 100644 vendor/crunchy/.cargo-checksum.json create mode 100644 vendor/crunchy/Cargo.toml create mode 100644 vendor/crunchy/README.md create mode 100644 vendor/crunchy/build.rs create mode 100644 vendor/crunchy/src/lib.rs create mode 100644 vendor/dlv-list/.cargo-checksum.json create mode 100644 vendor/dlv-list/CHANGELOG.md create mode 100644 vendor/dlv-list/CONTRIBUTING.md create mode 100644 vendor/dlv-list/Cargo.toml create mode 100644 vendor/dlv-list/LICENSE-APACHE create mode 100644 vendor/dlv-list/LICENSE-MIT create mode 100644 vendor/dlv-list/README.md create mode 100644 vendor/dlv-list/codecov.yml create mode 100644 vendor/dlv-list/rustfmt.toml create mode 100644 vendor/dlv-list/src/lib.rs create mode 100644 vendor/dlv-list/src/serde.rs create mode 100644 vendor/getrandom/.cargo-checksum.json create mode 100644 vendor/getrandom/CHANGELOG.md create mode 100644 vendor/getrandom/Cargo.toml create mode 100644 vendor/getrandom/LICENSE-APACHE create mode 100644 vendor/getrandom/LICENSE-MIT create mode 100644 vendor/getrandom/README.md create mode 100644 vendor/getrandom/SECURITY.md create mode 100644 vendor/getrandom/benches/buffer.rs create mode 100644 vendor/getrandom/src/3ds.rs create mode 100644 vendor/getrandom/src/apple-other.rs create mode 100644 vendor/getrandom/src/bsd_arandom.rs create mode 100644 vendor/getrandom/src/custom.rs create mode 100644 vendor/getrandom/src/dragonfly.rs create mode 100644 vendor/getrandom/src/emscripten.rs create mode 100644 vendor/getrandom/src/error.rs create mode 100644 vendor/getrandom/src/error_impls.rs create mode 100644 vendor/getrandom/src/espidf.rs create mode 100644 vendor/getrandom/src/fuchsia.rs create mode 100644 vendor/getrandom/src/hermit.rs create mode 100644 vendor/getrandom/src/hurd.rs create mode 100644 vendor/getrandom/src/js.rs create mode 100644 vendor/getrandom/src/lib.rs create mode 100644 vendor/getrandom/src/linux_android.rs create mode 100644 vendor/getrandom/src/macos.rs create mode 100644 vendor/getrandom/src/openbsd.rs create mode 100644 vendor/getrandom/src/rdrand.rs create mode 100644 vendor/getrandom/src/solaris_illumos.rs create mode 100644 vendor/getrandom/src/solid.rs create mode 100644 vendor/getrandom/src/use_file.rs create mode 100644 vendor/getrandom/src/util.rs create mode 100644 vendor/getrandom/src/util_libc.rs create mode 100644 vendor/getrandom/src/vita.rs create mode 100644 vendor/getrandom/src/vxworks.rs create mode 100644 vendor/getrandom/src/wasi.rs create mode 100644 vendor/getrandom/src/windows.rs create mode 100644 vendor/getrandom/tests/common/mod.rs create mode 100644 vendor/getrandom/tests/custom.rs create mode 100644 vendor/getrandom/tests/normal.rs create mode 100644 vendor/getrandom/tests/rdrand.rs create mode 100644 vendor/ordered-multimap/.cargo-checksum.json create mode 100644 vendor/ordered-multimap/CHANGELOG.md create mode 100644 vendor/ordered-multimap/CONTRIBUTING.md create mode 100644 vendor/ordered-multimap/Cargo.toml create mode 100644 vendor/ordered-multimap/LICENSE create mode 100644 vendor/ordered-multimap/README.md create mode 100644 vendor/ordered-multimap/codecov.yml create mode 100644 vendor/ordered-multimap/rustfmt.toml create mode 100644 vendor/ordered-multimap/src/lib.rs create mode 100644 vendor/ordered-multimap/src/list_ordered_multimap.rs create mode 100644 vendor/ordered-multimap/src/serde.rs create mode 100644 vendor/rust-ini/.cargo-checksum.json create mode 100644 vendor/rust-ini/Cargo.toml create mode 100644 vendor/rust-ini/LICENSE create mode 100644 vendor/rust-ini/README.rst create mode 100644 vendor/rust-ini/examples/test.rs create mode 100644 vendor/rust-ini/rustfmt.toml create mode 100644 vendor/rust-ini/src/lib.rs create mode 100644 vendor/tiny-keccak/.cargo-checksum.json create mode 100644 vendor/tiny-keccak/Cargo.toml create mode 100644 vendor/tiny-keccak/LICENSE create mode 100644 vendor/tiny-keccak/README.md create mode 100644 vendor/tiny-keccak/benches/kangaroo.rs create mode 100644 vendor/tiny-keccak/benches/keccak.rs create mode 100644 vendor/tiny-keccak/build.rs create mode 100644 vendor/tiny-keccak/examples/sha3.rs create mode 100644 vendor/tiny-keccak/src/cshake.rs create mode 100644 vendor/tiny-keccak/src/k12.rs create mode 100644 vendor/tiny-keccak/src/keccak.rs create mode 100644 vendor/tiny-keccak/src/keccakf.rs create mode 100644 vendor/tiny-keccak/src/keccakp.rs create mode 100644 vendor/tiny-keccak/src/kmac.rs create mode 100644 vendor/tiny-keccak/src/lib.rs create mode 100644 vendor/tiny-keccak/src/parallel_hash.rs create mode 100644 vendor/tiny-keccak/src/sha3.rs create mode 100644 vendor/tiny-keccak/src/shake.rs create mode 100644 vendor/tiny-keccak/src/tuple_hash.rs create mode 100644 vendor/tiny-keccak/tests/cshake.rs create mode 100644 vendor/tiny-keccak/tests/kangaroo.rs create mode 100644 vendor/tiny-keccak/tests/keccak.rs create mode 100644 vendor/tiny-keccak/tests/kmac.rs create mode 100644 vendor/tiny-keccak/tests/parallel_hash.rs create mode 100644 vendor/tiny-keccak/tests/sha3.rs create mode 100644 vendor/tiny-keccak/tests/shake.rs create mode 100644 vendor/tiny-keccak/tests/tuple_hash.rs diff --git a/.gitignore b/.gitignore index 0cf51af..ce20f3e 100644 --- a/.gitignore +++ b/.gitignore @@ -5,7 +5,6 @@ target *.pyc ~* .idea/ -vendor Cargo.lock *.rto diff --git a/src/sysboostd/Cargo.toml b/src/sysboostd/Cargo.toml index c6d43da..0bbb790 100644 --- a/src/sysboostd/Cargo.toml +++ b/src/sysboostd/Cargo.toml @@ -22,6 +22,7 @@ toml = "0.5.9" inotify = "0.9" log = "0.4" goblin = "0.7" +rust-ini = "0.19" [dev-dependencies.tempfile] version = "3.2.0" diff --git a/vendor/const-random-macro/.cargo-checksum.json b/vendor/const-random-macro/.cargo-checksum.json new file mode 100644 index 0000000..4d851d8 --- /dev/null +++ b/vendor/const-random-macro/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"a56e17787441fdf7de892b488d64d31447276599b67baa7e035cc913559ce527","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","src/lib.rs":"88a3d7f284a8c4c796be7b696c07201958a3732b5d4cf223c2a0f098ee28b3b3","src/span.rs":"2013884f7a0d0ed4ea0d9be787b35e5a0fee93db3a8f45e9c59ef0ba28cd7da7"},"package":"f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e"} \ No newline at end of file diff --git a/vendor/const-random-macro/Cargo.toml b/vendor/const-random-macro/Cargo.toml new file mode 100644 index 0000000..46ae1f5 --- /dev/null +++ b/vendor/const-random-macro/Cargo.toml @@ -0,0 +1,43 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "const-random-macro" +version = "0.1.16" +authors = ["Tom Kaitchuck "] +description = "Provides the procedural macro used by const-random" +documentation = "https://docs.rs/const-random" +keywords = [ + "rust", + "constants", + "macro", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/tkaitchuck/constrandom" + +[lib] +proc-macro = true + +[dependencies.getrandom] +version = "0.2.0" + +[dependencies.once_cell] +version = "1.15" +features = [ + "race", + "alloc", +] +default-features = false + +[dependencies.tiny-keccak] +version = "2.0.2" +features = ["shake"] diff --git a/vendor/const-random-macro/LICENSE-APACHE b/vendor/const-random-macro/LICENSE-APACHE new file mode 100644 index 0000000..16fe87b --- /dev/null +++ b/vendor/const-random-macro/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/const-random-macro/LICENSE-MIT b/vendor/const-random-macro/LICENSE-MIT new file mode 100644 index 0000000..5afc2a7 --- /dev/null +++ b/vendor/const-random-macro/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Amanieu d'Antras + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/const-random-macro/src/lib.rs b/vendor/const-random-macro/src/lib.rs new file mode 100644 index 0000000..f656956 --- /dev/null +++ b/vendor/const-random-macro/src/lib.rs @@ -0,0 +1,57 @@ +#[allow(unused_extern_crates)] +extern crate proc_macro; + +use proc_macro::*; +use std::iter::once; +mod span; +use crate::span::{gen_random_bytes, gen_random}; + + +/// Create a TokenStream of an identifier out of a string +fn ident(ident: &str) -> TokenStream { + TokenTree::from(Ident::new(ident, Span::call_site())).into() +} + +#[proc_macro] +pub fn const_random(input: TokenStream) -> TokenStream { + match &input.to_string()[..] { + "u8" => TokenTree::from(Literal::u8_suffixed(gen_random())).into(), + "u16" => TokenTree::from(Literal::u16_suffixed(gen_random())).into(), + "u32" => TokenTree::from(Literal::u32_suffixed(gen_random())).into(), + "u64" => TokenTree::from(Literal::u64_suffixed(gen_random())).into(), + "u128" => TokenTree::from(Literal::u128_suffixed(gen_random())).into(), + "i8" => TokenTree::from(Literal::i8_suffixed(gen_random())).into(), + "i16" => TokenTree::from(Literal::i16_suffixed(gen_random())).into(), + "i32" => TokenTree::from(Literal::i32_suffixed(gen_random())).into(), + "i64" => TokenTree::from(Literal::i64_suffixed(gen_random())).into(), + "i128" => TokenTree::from(Literal::i128_suffixed(gen_random())).into(), + "usize" => { + let value: TokenStream = TokenTree::from(Literal::u128_suffixed(gen_random())).into(); + let type_cast: TokenStream = [value, ident("as"), ident("usize")] + .iter() + .cloned() + .collect(); + TokenTree::from(Group::new(Delimiter::Parenthesis, type_cast)).into() + } + "isize" => { + let value: TokenStream = TokenTree::from(Literal::i128_suffixed(gen_random())).into(); + let type_cast: TokenStream = [value, ident("as"), ident("isize")] + .iter() + .cloned() + .collect(); + TokenTree::from(Group::new(Delimiter::Parenthesis, type_cast)).into() + } + byte_array if byte_array.starts_with("[u8 ; ") && byte_array.ends_with(']')=> { + let len = byte_array[6..byte_array.len()-1].parse().unwrap(); + let mut random_bytes = vec![0; len]; + gen_random_bytes(&mut random_bytes); + let array_parts: TokenStream = random_bytes.into_iter().flat_map(|byte| { + let val = TokenTree::from(Literal::u8_suffixed(byte)); + let comma = TokenTree::from(Punct::new(',', Spacing::Alone)); + once(val).chain(once(comma)) + }).collect(); + TokenTree::from(Group::new(Delimiter::Bracket, array_parts)).into() + } + _ => panic!("Invalid type"), + } +} diff --git a/vendor/const-random-macro/src/span.rs b/vendor/const-random-macro/src/span.rs new file mode 100644 index 0000000..ab72bb0 --- /dev/null +++ b/vendor/const-random-macro/src/span.rs @@ -0,0 +1,104 @@ +use proc_macro::Span; +use std::option_env; + +use once_cell::race::OnceBox; +use tiny_keccak::{Xof, Hasher, Shake}; + + +static SEED: OnceBox> = OnceBox::new(); + +fn get_seed() -> &'static [u8] { + &SEED.get_or_init(|| { + if let Some(value) = option_env!("CONST_RANDOM_SEED") { + Box::new(value.as_bytes().to_vec()) + } else { + let mut value = [0u8; 32]; + getrandom::getrandom(&mut value).unwrap(); + Box::new(value.to_vec()) + } + })[..] +} + +pub(crate) fn gen_random() -> T { + Random::random() +} + +pub(crate) fn gen_random_bytes(output: &mut [u8]) { + hash_stuff().squeeze(output) +} + +pub(crate) trait Random { + fn random() -> Self; +} + +fn hash_stuff() -> impl Xof { + let span = Span::call_site(); + let mut hasher = Shake::v256(); + hasher.update(get_seed()); + hasher.update(&format!("{:?}", span).as_bytes()); + hasher +} + +impl Random for u64 { + fn random() -> Self { + let mut output = [0; 8]; + hash_stuff().squeeze(&mut output); + Self::from_ne_bytes(output) + } +} + +impl Random for u128 { + fn random() -> Self { + let mut output = [0; 16]; + hash_stuff().squeeze(&mut output); + Self::from_ne_bytes(output) + } +} + +impl Random for u8 { + fn random() -> Self { + u64::random() as u8 + } +} + +impl Random for u16 { + fn random() -> Self { + u64::random() as u16 + } +} + +impl Random for u32 { + fn random() -> Self { + u64::random() as u32 + } +} + +impl Random for i8 { + fn random() -> Self { + i64::random() as i8 + } +} + +impl Random for i16 { + fn random() -> Self { + i64::random() as i16 + } +} + +impl Random for i32 { + fn random() -> Self { + i64::random() as i32 + } +} + +impl Random for i64 { + fn random() -> Self { + u64::random() as i64 + } +} + +impl Random for i128 { + fn random() -> Self { + u128::random() as i128 + } +} diff --git a/vendor/const-random/.cargo-checksum.json b/vendor/const-random/.cargo-checksum.json new file mode 100644 index 0000000..2204f6d --- /dev/null +++ b/vendor/const-random/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"80431001f5e6c1541a1284baa58a7e8e1bf3bccc5fd611b3fa3e04be0dd1c4b0","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"3b2f80faa84bcddf0530b15df6b4d95dde628975c593ad6a85bba101ff178f2c","src/lib.rs":"d326c18b1f174e456a7fef931c1403faa46cc689055a6dc5dfbfd3f825b1cbf5","tests/tests.rs":"8553f4bae28c61e80289bce82c432e6c992519bad0e965ad4b889c89abf3ce17"},"package":"5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a"} \ No newline at end of file diff --git a/vendor/const-random/Cargo.toml b/vendor/const-random/Cargo.toml new file mode 100644 index 0000000..5bb9da4 --- /dev/null +++ b/vendor/const-random/Cargo.toml @@ -0,0 +1,29 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "const-random" +version = "0.1.17" +authors = ["Tom Kaitchuck "] +description = "Provides compile time random number generation." +documentation = "https://docs.rs/const-random" +readme = "README.md" +keywords = [ + "rust", + "constants", + "macro", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/tkaitchuck/constrandom" + +[dependencies.const-random-macro] +version = "0.1.16" diff --git a/vendor/const-random/LICENSE-APACHE b/vendor/const-random/LICENSE-APACHE new file mode 100644 index 0000000..16fe87b --- /dev/null +++ b/vendor/const-random/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/const-random/LICENSE-MIT b/vendor/const-random/LICENSE-MIT new file mode 100644 index 0000000..5afc2a7 --- /dev/null +++ b/vendor/const-random/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Amanieu d'Antras + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/const-random/README.md b/vendor/const-random/README.md new file mode 100644 index 0000000..aa21bec --- /dev/null +++ b/vendor/const-random/README.md @@ -0,0 +1,24 @@ +# Random constants +This crate provides compile time random number generation. +This allows you to insert random constants into your code that will be auto-generated at compile time. + +A new value will be generated every time the file is rebuilt. +This obviously makes the resulting binary or lib non-deterministic. (See below) + +# Example + +```rust +use const_random::const_random ; +const MY_RANDOM_NUMBER: u32 = const_random!(u32); +``` +This works exactly as through you have called: `OsRng.gen::()` at compile time. +So for details of the random number generation, see the `rand` crates documentation. + +The following types are supported: u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize and [u8; N]. + +# Deterministic builds + +Sometimes it is an advantage for build systems to be deterministic. To support this `const-random` reads the environmental +variable `CONST_RANDOM_SEED`. If this variable is set, it will be used as the seed for the random number generation. +Setting the same seed on a build of the same code should result in identical output. + diff --git a/vendor/const-random/src/lib.rs b/vendor/const-random/src/lib.rs new file mode 100644 index 0000000..ae521db --- /dev/null +++ b/vendor/const-random/src/lib.rs @@ -0,0 +1,13 @@ +#![no_std] +/// # Random constants +/// Allows you to insert random constants into your code that will be auto-generated at compile time. +/// A new value will be generated every time the relevent file is re-built. +/// # Example +/// ``` +/// use const_random::const_random ; +/// const MY_RANDOM_NUMBER: u32 = const_random!(u32); +/// const MY_RANDOM_BYTES: [u8; 32] = const_random!([u8; 32]); +/// ``` +/// +/// The following types are supported u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize and [u8; N]. +pub use const_random_macro::const_random; diff --git a/vendor/const-random/tests/tests.rs b/vendor/const-random/tests/tests.rs new file mode 100644 index 0000000..775a2d2 --- /dev/null +++ b/vendor/const-random/tests/tests.rs @@ -0,0 +1,57 @@ +use const_random::const_random; + +#[test] +fn u32() { + const VALUE1: u32 = const_random!(u32); + const VALUE2: u32 = const_random!(u32); + assert_ne!(0, VALUE1, "A random generated constant was zero. (This can randomly occur one time in 2^32) If this reproduces, it is a bug."); + assert_ne!(0, VALUE2, "A random generated constant was zero. (This can randomly occur one time in 2^32) If this reproduces, it is a bug."); + assert_ne!(VALUE1, VALUE2, "A random generated constant was the same as another. (This can randomly occur one time in 2^32) If this reproduces, it is a bug."); +} + +#[test] +fn i64() { + const VALUE1: i64 = const_random!(i64); + const VALUE2: i64 = const_random!(i64); + assert_ne!(0, VALUE1, "A random generated constant was zero. (This can randomly occur one time in 2^64) If this reproduces, it is a bug."); + assert_ne!(0, VALUE2, "A random generated constant was zero. (This can randomly occur one time in 2^64) If this reproduces, it is a bug."); + assert_ne!(VALUE1, VALUE2, "A random generated constant was the same as another. (This can randomly occur one time in 2^64) If this reproduces, it is a bug."); +} + +#[test] +fn usize() { + const VALUE1: usize = const_random!(usize); + const VALUE2: usize = const_random!(usize); + assert_ne!(0, VALUE1, "A random generated constant was zero. (This can randomly occur one time in 2^64) If this reproduces, it is a bug."); + assert_ne!(0, VALUE2, "A random generated constant was zero. (This can randomly occur one time in 2^64) If this reproduces, it is a bug."); + assert_ne!(VALUE1, VALUE2, "A random generated constant was the same as another. (This can randomly occur one time in 2^64) If this reproduces, it is a bug."); +} + +#[test] +fn u128() { + const VALUE1: u128 = const_random!(u128); + const VALUE2: u128 = const_random!(u128); + assert_ne!(0, VALUE1); + assert_ne!(0, VALUE2); + assert_ne!(VALUE1, VALUE2); +} + +#[test] +fn suffixed() { + fn f(_: T) { + // If const_random! emits an unsuffixed integer literal, this assertion + // would fail because T would be inferred as the default unsuffixed + // integer literal type i32. + assert_eq!("u8", std::any::type_name::()); + } + f(const_random!(u8)); +} + +#[test] +fn array() { + const VALUE1: &[u8] = &const_random!([u8; 30]); + const VALUE2: [u8; 30] = const_random!([u8; 30]); + assert_ne!([0u8; 30], VALUE1); + assert_ne!([0u8; 30], VALUE2); + assert_ne!(VALUE1, VALUE2); +} diff --git a/vendor/crunchy/.cargo-checksum.json b/vendor/crunchy/.cargo-checksum.json new file mode 100644 index 0000000..225b779 --- /dev/null +++ b/vendor/crunchy/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"5238a96db3efb623481ac00afca51748372d99e472bc4943f2d1762ad4cc6cf0","README.md":"6d402ec0e7baa639139f550f501b26f0fcd7e33d4b5b1184e027c5836cd6ec06","build.rs":"1fae5664addf11f9e703430663950a6f48aa9d48bfe1b80281e0b25999da3814","src/lib.rs":"e9696c988b07c250de57b4b65254869cfd75e24f5acaa0a7dd54d80facfc3fd9"},"package":"7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"} \ No newline at end of file diff --git a/vendor/crunchy/Cargo.toml b/vendor/crunchy/Cargo.toml new file mode 100644 index 0000000..b05c686 --- /dev/null +++ b/vendor/crunchy/Cargo.toml @@ -0,0 +1,31 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +name = "crunchy" +version = "0.2.2" +authors = ["Vurich "] +build = "build.rs" +description = "Crunchy unroller: deterministically unroll constant loops" +license = "MIT" + +[dependencies] + +[features] +default = ["limit_128"] +limit_1024 = [] +limit_128 = [] +limit_2048 = [] +limit_256 = [] +limit_512 = [] +limit_64 = [] +std = [] diff --git a/vendor/crunchy/README.md b/vendor/crunchy/README.md new file mode 100644 index 0000000..9036450 --- /dev/null +++ b/vendor/crunchy/README.md @@ -0,0 +1,38 @@ +# Crunchy + +The crunchy unroller - deterministically unroll constant loops. For number +"crunching". + +The Rust optimizer will unroll constant loops that don't use the loop variable, +like this: + +```rust +for _ in 0..100 { + println!("Hello!"); +} +``` + +However, using the loop variable will cause it to never unroll the loop. This is +unfortunate because it means that you can't constant-fold the loop variable, and +if you end up stomping on the registers it will have to do a load for each +iteration. This crate ensures that your code is unrolled and const-folded. It +only works on literals, unfortunately, but there's a work-around: + +```rust +debug_assert_eq!(MY_CONSTANT, 100); +unroll! { + for i in 0..100 { + println!("Iteration {}", i); + } +} +``` + +This means that your tests will catch if you redefine the constant. + +To default maximum number of loops to unroll is `64`, but that can be easily increased using the cargo features: + +* `limit_128` +* `limit_256` +* `limit_512` +* `limit_1024` +* `limit_2048` \ No newline at end of file diff --git a/vendor/crunchy/build.rs b/vendor/crunchy/build.rs new file mode 100644 index 0000000..b9c69ae --- /dev/null +++ b/vendor/crunchy/build.rs @@ -0,0 +1,253 @@ +use std::env; +use std::fs::File; +use std::io::Write; +use std::path::Path; + +const LOWER_LIMIT: usize = 16; + +fn main() { + let limit = if cfg!(feature="limit_2048") { + 2048 + } else if cfg!(feature="limit_1024") { + 1024 + } else if cfg!(feature="limit_512") { + 512 + } else if cfg!(feature="limit_256") { + 256 + } else if cfg!(feature="limit_128") { + 128 + } else { + 64 + }; + + let out_dir = env::var("OUT_DIR").unwrap(); + let dest_path = Path::new(&out_dir).join("lib.rs"); + let mut f = File::create(&dest_path).unwrap(); + + let mut output = String::new(); + + output.push_str(r#" +/// Unroll the given for loop +/// +/// Example: +/// +/// ```ignore +/// unroll! { +/// for i in 0..5 { +/// println!("Iteration {}", i); +/// } +/// } +/// ``` +/// +/// will expand into: +/// +/// ```ignore +/// { println!("Iteration {}", 0); } +/// { println!("Iteration {}", 1); } +/// { println!("Iteration {}", 2); } +/// { println!("Iteration {}", 3); } +/// { println!("Iteration {}", 4); } +/// ``` +#[macro_export] +macro_rules! unroll { + (for $v:ident in 0..0 $c:block) => {}; + + (for $v:ident < $max:tt in ($start:tt..$end:tt).step_by($val:expr) {$($c:tt)*}) => { + { + let step = $val; + let start = $start; + let end = start + ($end - start) / step; + unroll! { + for val < $max in start..end { + let $v: usize = ((val - start) * step) + start; + + $($c)* + } + } + } + }; + + (for $v:ident in ($start:tt..$end:tt).step_by($val:expr) {$($c:tt)*}) => { + unroll! { + for $v < $end in ($start..$end).step_by($val) {$($c)*} + } + }; + + (for $v:ident in ($start:tt..$end:tt) {$($c:tt)*}) => { + unroll!{ + for $v in $start..$end {$($c)*} + } + }; + + (for $v:ident in $start:tt..$end:tt {$($c:tt)*}) => { + #[allow(non_upper_case_globals)] + #[allow(unused_comparisons)] + { + unroll!(@$v, 0, $end, { + if $v >= $start {$($c)*} + } + ); + } + }; + + (for $v:ident < $max:tt in $start:tt..$end:tt $c:block) => { + #[allow(non_upper_case_globals)] + { + let range = $start..$end; + assert!( + $max >= range.end, + "`{}` out of range `{:?}`", + stringify!($max), + range, + ); + unroll!( + @$v, + 0, + $max, + { + if $v >= range.start && $v < range.end { + $c + } + } + ); + } + }; + + (for $v:ident in 0..$end:tt {$($statement:tt)*}) => { + #[allow(non_upper_case_globals)] + { unroll!(@$v, 0, $end, {$($statement)*}); } + }; + +"#); + + for i in 0..limit + 1 { + output.push_str(format!(" (@$v:ident, $a:expr, {}, $c:block) => {{\n", i).as_str()); + + if i <= LOWER_LIMIT { + output.push_str(format!(" {{ const $v: usize = $a; $c }}\n").as_str()); + + for a in 1..i { + output.push_str(format!(" {{ const $v: usize = $a + {}; $c }}\n", a).as_str()); + } + } else { + let half = i / 2; + + if i % 2 == 0 { + output.push_str(format!(" unroll!(@$v, $a, {0}, $c);\n", half).as_str()); + output.push_str(format!(" unroll!(@$v, $a + {0}, {0}, $c);\n", half).as_str()); + } else { + if half > 1 { + output.push_str(format!(" unroll!(@$v, $a, {}, $c);\n", i - 1).as_str()) + } + + output.push_str(format!(" {{ const $v: usize = $a + {}; $c }}\n", i - 1).as_str()); + } + } + + output.push_str(" };\n\n"); + } + + output.push_str("}\n\n"); + + output.push_str(format!(r#" +#[cfg(all(test, feature = "std"))] +mod tests {{ + #[test] + fn invalid_range() {{ + let mut a: Vec = vec![]; + unroll! {{ + for i in (5..4) {{ + a.push(i); + }} + }} + assert_eq!(a, vec![]); + }} + + #[test] + fn start_at_one_with_step() {{ + let mut a: Vec = vec![]; + unroll! {{ + for i in (2..4).step_by(1) {{ + a.push(i); + }} + }} + assert_eq!(a, vec![2, 3]); + }} + + #[test] + fn start_at_one() {{ + let mut a: Vec = vec![]; + unroll! {{ + for i in 1..4 {{ + a.push(i); + }} + }} + assert_eq!(a, vec![1, 2, 3]); + }} + + #[test] + fn test_all() {{ + {{ + let a: Vec = vec![]; + unroll! {{ + for i in 0..0 {{ + a.push(i); + }} + }} + assert_eq!(a, (0..0).collect::>()); + }} + {{ + let mut a: Vec = vec![]; + unroll! {{ + for i in 0..1 {{ + a.push(i); + }} + }} + assert_eq!(a, (0..1).collect::>()); + }} + {{ + let mut a: Vec = vec![]; + unroll! {{ + for i in 0..{0} {{ + a.push(i); + }} + }} + assert_eq!(a, (0..{0}).collect::>()); + }} + {{ + let mut a: Vec = vec![]; + let start = {0} / 4; + let end = start * 3; + unroll! {{ + for i < {0} in start..end {{ + a.push(i); + }} + }} + assert_eq!(a, (start..end).collect::>()); + }} + {{ + let mut a: Vec = vec![]; + unroll! {{ + for i in (0..{0}).step_by(2) {{ + a.push(i); + }} + }} + assert_eq!(a, (0..{0} / 2).map(|x| x * 2).collect::>()); + }} + {{ + let mut a: Vec = vec![]; + let start = {0} / 4; + let end = start * 3; + unroll! {{ + for i < {0} in (start..end).step_by(2) {{ + a.push(i); + }} + }} + assert_eq!(a, (start..end).filter(|x| x % 2 == 0).collect::>()); + }} + }} +}} +"#, limit).as_str()); + + f.write_all(output.as_bytes()).unwrap(); +} diff --git a/vendor/crunchy/src/lib.rs b/vendor/crunchy/src/lib.rs new file mode 100644 index 0000000..6de39e2 --- /dev/null +++ b/vendor/crunchy/src/lib.rs @@ -0,0 +1,36 @@ +//! The crunchy unroller - deterministically unroll constant loops. For number "crunching". +//! +//! The Rust optimizer will unroll constant loops that don't use the loop variable, like this: +//! +//! ```ignore +//! for _ in 0..100 { +//! println!("Hello!"); +//! } +//! ``` +//! +//! However, using the loop variable will cause it to never unroll the loop. This is unfortunate because it means that you can't +//! constant-fold the loop variable, and if you end up stomping on the registers it will have to do a load for each iteration. +//! This crate ensures that your code is unrolled and const-folded. It only works on literals, +//! unfortunately, but there's a work-around: +//! +//! ```ignore +//! debug_assert_eq!(MY_CONSTANT, 100); +//! unroll! { +//! for i in 0..100 { +//! println!("Iteration {}", i); +//! } +//! } +//! ``` +//! This means that your tests will catch if you redefine the constant. +//! +//! To default maximum number of loops to unroll is `64`, but that can be easily increased using the cargo features: +//! +//! * `limit_128` +//! * `limit_256` +//! * `limit_512` +//! * `limit_1024` +//! * `limit_2048` + +#![cfg_attr(not(feature = "std"), no_std)] + +include!(concat!(env!("OUT_DIR"), "/lib.rs")); diff --git a/vendor/dlv-list/.cargo-checksum.json b/vendor/dlv-list/.cargo-checksum.json new file mode 100644 index 0000000..7f4d439 --- /dev/null +++ b/vendor/dlv-list/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"e1b15544e251c43a45b8c3b8b03209011fe0fc1317ec7a9e27ed369439536106","CONTRIBUTING.md":"67f4e7464aca9efc85404a2b24c1045574b3ee804a2e9875f2fd1ba114d7505f","Cargo.toml":"e41ee2c42c2cf1ac1d3116d9651313ee037faf8f14f11baa83fe62e0b9768cf1","LICENSE-APACHE":"95bd3988beee069fa2848f648dab43cc6e0b2add2ad6bcb17360caf749802bcc","LICENSE-MIT":"77adcddfe9e50acd2df63ddbb8e566bb8d34b4d02a9d92e7a5c1b9c2225eda9f","README.md":"a75755d99736e2ac12945efcfabe83239dcef190b7811bf376b83d0121e6ceaa","codecov.yml":"550982ef37ab56e6c3e06351f359f2407855a54c27f7e9c7871b855aa34c9109","rustfmt.toml":"9d197f8ce3b24c6aa98627d614420d5291fde7c5442cf77a7f8718dc9375f361","src/lib.rs":"612211fad07183f979a25dc3883d70fa739b3783230e281e8553bf10747973ad","src/serde.rs":"e65c9bf53ed43bd92cec925cf2a4e87223394a5c387bacca32a253692136b8bd"},"package":"442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f"} \ No newline at end of file diff --git a/vendor/dlv-list/CHANGELOG.md b/vendor/dlv-list/CHANGELOG.md new file mode 100644 index 0000000..2b509b7 --- /dev/null +++ b/vendor/dlv-list/CHANGELOG.md @@ -0,0 +1,119 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [0.5.2] - 2023-10-24 + +### Changed + + - Updated `coverage-helper` dependency to `0.2.0`. + +## [0.5.1] - 2023-08-31 + +### Added + + - Added `front_index` and `back_index` functions - @ColinFinck + +## [0.5.0] - 2023-01-21 + +### Added + + - Added support for `no_std` - @notgull + - Added `get_unchecked` and `get_unchecked_mut` functions - @yshui + - Added `move_before` and `move_after` functions - @yshui + +### Changed + + - Added specific implementation for in-place clone for better performance - @yshui + - Reduced size of `Entry` - @yshui + +## [0.4.0] - 2022-08-25 + +### Added + + - Added symmetric versions of existing `PartialEq` implementations. + - Added `PartialEq` for fixed size arrays. + - Added optional `serde` support. + +### Changed + + - Added `#[must_use]` to all side-effect free functions. + +## [0.3.0] + +### Removed + + - Drop dependency on `rand`. + +## [0.2.4] + +### Removed + + - Yanked due to breaking change. + +## [0.2.3] + +### Changed + + - Update `rand` from `0.7.3` to `0.8.3`. + +## [0.2.2] + +### Changed + + - Update `rand` from `0.5.5` to `0.7.3`. + +## [0.2.1] + +### Changed + + - Reduce memory size of `VecList` from 96 bytes to 64. Tradeoff is max capacity is now reduced by 1 and a very slight + performance decrease. + +## [0.2.0] + +### Changed + + - Change `VecList::retain` to give mutability to entries. + +## [0.1.5] + +### Added + + - Add unsafe removal function `VecList::remove_sync`. See its documentation for details. + +## [0.1.4] + +### Changed + + - Remove unnecessary `Debug` bounds. + +## [0.1.3] + +### Fixed + + - Fix possible overflow when incrementing generation. + - Fix underflow when calling `pack_to_fit` on an empty `VecList`. + +## [0.1.2] + +### Added + + - Make iterator `iter` functions public. + +## [0.1.1] + +### Changed + + - Iterator optimizations. + +## [0.1.0] + +### Added + + - Initial release. diff --git a/vendor/dlv-list/CONTRIBUTING.md b/vendor/dlv-list/CONTRIBUTING.md new file mode 100644 index 0000000..614cf5b --- /dev/null +++ b/vendor/dlv-list/CONTRIBUTING.md @@ -0,0 +1,74 @@ +# Contribution guidelines + +First off, thank you for considering contributing to dlv-list. + +If your contribution is not straightforward, please first discuss the change you wish to make by creating a new issue +before making the change. + +## Reporting issues + +Before reporting an issue on the [issue tracker](https://github.com/sgodwincs/dlv-list/issues), please +check that it has not already been reported by searching for some related keywords. + +## Pull requests + +Try to do one pull request per change. + +### Updating the changelog + +Update the changes you have made in +[CHANGELOG](https://github.com/sgodwincs/dlv-list/blob/main/CHANGELOG.md) +file under the **Unreleased** section. + +Add the changes of your pull request to one of the following subsections, depending on the types of changes defined by +[Keep a changelog](https://keepachangelog.com/en/1.0.0/): + +- `Added` for new features. +- `Changed` for changes in existing functionality. +- `Deprecated` for soon-to-be removed features. +- `Removed` for now removed features. +- `Fixed` for any bug fixes. +- `Security` in case of vulnerabilities. + +If the required subsection does not exist yet under **Unreleased**, create it! + +## Developing + +### Set up + +This is no different than other Rust projects. + +```shell +git clone https://github.com/sgodwincs/dlv-list +cd dlv-list +cargo test +``` + +### Useful Commands +- Run Clippy: + + ```shell + cargo clippy --all-targets --all-features --workspace + ``` + +- Run all tests: + + ```shell + cargo test --all-features --workspace + ``` + +- Check to see if there are code formatting issues + + ```shell + cargo fmt --all -- --check + ``` + +- Format the code in the project + + ```shell + cargo fmt --all + ``` + +## Code of Conduct + +This project adheres to the Rust Code of Conduct, which can be found [here](https://www.rust-lang.org/conduct.html). diff --git a/vendor/dlv-list/Cargo.toml b/vendor/dlv-list/Cargo.toml new file mode 100644 index 0000000..4df508a --- /dev/null +++ b/vendor/dlv-list/Cargo.toml @@ -0,0 +1,44 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "dlv-list" +version = "0.5.2" +authors = ["Scott Godwin "] +description = "Semi-doubly linked list implemented using a vector" +readme = "README.md" +keywords = [ + "vector", + "linked", + "list", +] +categories = ["data-structures"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/sgodwincs/dlv-list-rs" + +[dependencies.const-random] +version = "0.1.15" + +[dependencies.serde] +version = "1" +optional = true +default-features = false + +[dev-dependencies.coverage-helper] +version = "0.2.0" + +[dev-dependencies.serde_test] +version = "1.0.144" + +[features] +default = ["std"] +std = [] diff --git a/vendor/dlv-list/LICENSE-APACHE b/vendor/dlv-list/LICENSE-APACHE new file mode 100644 index 0000000..a7e77cb --- /dev/null +++ b/vendor/dlv-list/LICENSE-APACHE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/vendor/dlv-list/LICENSE-MIT b/vendor/dlv-list/LICENSE-MIT new file mode 100644 index 0000000..1a0f083 --- /dev/null +++ b/vendor/dlv-list/LICENSE-MIT @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Scott Godwin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/dlv-list/README.md b/vendor/dlv-list/README.md new file mode 100644 index 0000000..2ddf783 --- /dev/null +++ b/vendor/dlv-list/README.md @@ -0,0 +1,31 @@ +# dlv-list-rs + +[![Crates.io](https://img.shields.io/crates/v/dlv-list.svg)](https://crates.io/crates/dlv-list) +[![Docs.rs](https://docs.rs/dlv-list/badge.svg)](https://docs.rs/dlv_list) +[![CI](https://github.com/sgodwincs/dlv-list-rs/workflows/CI/badge.svg)](https://github.com/sgodwincs/dlv-list-rs/actions) + +Semi-doubly linked list implemented using a vector. + +# Features + + - `std` (default) enables usage of `libstd`, disabling this feature will make the crate `no_std` compatible. + - `serde` for (de)serialization. + +## License + +Licensed under either of + + * Apache License, Version 2.0 + ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) + * MIT license + ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) + +at your option. + +## Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as +defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions. + +See [CONTRIBUTING.md](CONTRIBUTING.md). + diff --git a/vendor/dlv-list/codecov.yml b/vendor/dlv-list/codecov.yml new file mode 100644 index 0000000..d898005 --- /dev/null +++ b/vendor/dlv-list/codecov.yml @@ -0,0 +1,5 @@ +coverage: + status: + project: + default: + target: 95% diff --git a/vendor/dlv-list/rustfmt.toml b/vendor/dlv-list/rustfmt.toml new file mode 100644 index 0000000..723f864 --- /dev/null +++ b/vendor/dlv-list/rustfmt.toml @@ -0,0 +1,2 @@ +imports_granularity = "Crate" +tab_spaces = 2 diff --git a/vendor/dlv-list/src/lib.rs b/vendor/dlv-list/src/lib.rs new file mode 100644 index 0000000..9754363 --- /dev/null +++ b/vendor/dlv-list/src/lib.rs @@ -0,0 +1,3546 @@ +//! Crate that implements a semi-doubly linked list via a vector. +//! +//! See [`VecList`] for more information. +//! +//! # Features +//! +//! By default, this crate uses the Rust standard library. To disable this, disable the default +//! `no_std` feature. Without this feature, certain methods will not be available. + +#![allow(unsafe_code)] +#![cfg_attr(coverage_nightly, feature(coverage_attribute))] +#![cfg_attr(not(any(feature = "std", test)), no_std)] + +extern crate alloc; + +use alloc::{collections::LinkedList, vec::Vec}; +use core::{ + cmp::Ordering, + fmt::{self, Debug, Formatter}, + hash::{Hash, Hasher}, + hint::unreachable_unchecked, + iter::{FromIterator, FusedIterator}, + marker::PhantomData, + mem, + num::NonZeroUsize, + ops, +}; + +#[cfg(feature = "std")] +use std::collections::HashMap; + +#[cfg(feature = "serde")] +mod serde; + +/// Number type that's capable of representing [0, usize::MAX - 1] +#[repr(transparent)] +#[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] +struct NonMaxUsize(NonZeroUsize); + +impl Debug for NonMaxUsize { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.get()) + } +} + +impl NonMaxUsize { + /// Convert an index to a usize + #[cfg_attr(mutants, mutants::skip)] + #[inline] + const fn get(&self) -> usize { + self.0.get() - 1 + } + + /// Create a new index from a usize, if `index` is `usize::MAX` then `None` is returned + #[inline] + const fn new(index: usize) -> Option { + match NonZeroUsize::new(index.wrapping_add(1)) { + Some(index) => Some(Self(index)), + None => None, + } + } + + /// Create a new index from a usize, without checking if `index` is `usize::MAX`. + /// + /// # Safety + /// + /// `index` must not be `usize::MAX` + #[cfg(feature = "std")] + #[inline] + const unsafe fn new_unchecked(index: usize) -> Self { + Self(unsafe { NonZeroUsize::new_unchecked(index + 1) }) + } + + /// Add an unsigned integer to a index. Check for bound violation and return `None` if the result will be larger than or equal to `usize::MAX` + #[cfg(feature = "std")] + #[inline] + fn checked_add(&self, rhs: usize) -> Option { + self.0.checked_add(rhs).map(Self) + } + + /// Subtract an unsigned integer from a index. Check for bound violation and return `None` if the result will be less than 0. + #[cfg(feature = "std")] + #[inline] + fn checked_sub(&self, rhs: usize) -> Option { + // Safety: `self` is less than `usize::MAX`, so `self - rhs` can only be less than `usize::MAX` + self + .get() + .checked_sub(rhs) + .map(|i| unsafe { Self::new_unchecked(i) }) + } + + #[cfg(feature = "std")] + #[inline] + const fn zero() -> Self { + Self(unsafe { NonZeroUsize::new_unchecked(1) }) + } +} + +impl PartialEq for NonMaxUsize { + fn eq(&self, other: &usize) -> bool { + self.get() == *other + } +} + +impl PartialOrd for NonMaxUsize { + fn partial_cmp(&self, other: &usize) -> Option { + self.get().partial_cmp(other) + } +} + +/// A semi-doubly linked list implemented with a vector. +/// +/// This provides many of the benefits of an actual linked list with a few tradeoffs. First, due to the use of an +/// underlying vector, an individual insert operation may be O(n) due to allocating more space for the vector. However, +/// it is amortized O(1) and it avoids the frequent allocations that traditional linked lists suffer from. +/// +/// Another tradeoff is that extending a traditional linked list with another list is O(1) but a vector based +/// implementation is O(n). Splicing has a similar disadvantage. +/// +/// Lastly, the vector based implementation is likely to have better cache locality in general. +pub struct VecList { + /// The backing storage for the list. This includes both used and unused indices. + entries: Vec>, + + /// The current generation of the list. This is used to avoid the ABA problem. + generation: u64, + + /// The index of the head of the list. + head: Option, + + /// The length of the list since we cannot rely on the length of [`VecList::entries`] because it includes unused + /// indices. + length: usize, + + /// The index of the tail of the list. + tail: Option, + + /// The index of the head of the vacant indices. + vacant_head: Option, +} + +impl Clone for VecList { + fn clone(&self) -> Self { + Self { + entries: self.entries.clone(), + generation: self.generation, + head: self.head, + length: self.length, + tail: self.tail, + vacant_head: self.vacant_head, + } + } + + fn clone_from(&mut self, source: &Self) { + self.entries.clone_from(&source.entries); + self.generation = source.generation; + self.head = source.head; + self.length = source.length; + self.tail = source.tail; + self.vacant_head = source.vacant_head; + } +} + +impl VecList { + /// Returns an immutable reference to the value at the back of the list, if it exists. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert_eq!(list.back(), None); + /// + /// list.push_back(0); + /// list.push_back(5); + /// assert_eq!(list.back(), Some(&5)); + /// ``` + #[must_use] + pub fn back(&self) -> Option<&T> { + let index = self.tail?.get(); + + match &self.entries[index] { + Entry::Occupied(entry) => Some(&entry.value), + _ => None, + } + } + + /// Returns the index of the value at the back of the list, if it exists. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert_eq!(list.back_index(), None); + /// + /// list.push_back(0); + /// let index = list.push_back(5); + /// assert_eq!(list.back_index(), Some(index)); + /// ``` + #[must_use] + pub fn back_index(&self) -> Option> { + let index = self.tail?; + let entry = self.entries[index.get()].occupied_ref(); + let index = Index::new(index, entry.generation); + Some(index) + } + + /// Returns a mutable reference to the value at the back of the list, if it exists. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert_eq!(list.back_mut(), None); + /// + /// list.push_back(0); + /// list.push_back(5); + /// + /// let mut back = list.back_mut().unwrap(); + /// assert_eq!(back, &mut 5); + /// *back *= 2; + /// + /// assert_eq!(list.back(), Some(&10)); + /// ``` + #[must_use] + pub fn back_mut(&mut self) -> Option<&mut T> { + let index = self.tail?.get(); + + match &mut self.entries[index] { + Entry::Occupied(entry) => Some(&mut entry.value), + _ => None, + } + } + + /// Returns the capacity of the list. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let list: VecList = VecList::new(); + /// assert_eq!(list.capacity(), 0); + /// + /// let list: VecList = VecList::with_capacity(10); + /// assert_eq!(list.capacity(), 10); + /// ``` + #[must_use] + pub fn capacity(&self) -> usize { + self.entries.capacity() + } + + /// Removes all values from the list and invalidates all existing indices. + /// + /// Complexity: O(n) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// + /// list.push_back(5); + /// assert!(!list.is_empty()); + /// + /// list.clear(); + /// assert!(list.is_empty()); + /// ``` + pub fn clear(&mut self) { + self.entries.clear(); + self.generation = self.generation.wrapping_add(1); + self.head = None; + self.length = 0; + self.tail = None; + self.vacant_head = None; + } + + /// Returns whether or not the list contains the given value. + /// + /// Complexity: O(n) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert!(!list.contains(&0)); + /// + /// list.push_back(0); + /// assert!(list.contains(&0)); + /// ``` + #[must_use] + pub fn contains(&self, value: &T) -> bool + where + T: PartialEq, + { + self.iter().any(|entry| entry == value) + } + + /// Creates a draining iterator that removes all values from the list and yields them in order. + /// + /// All values are removed even if the iterator is only partially consumed or not consumed at all. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// list.push_back(0); + /// list.push_back(5); + /// + /// { + /// let mut iter = list.drain(); + /// assert_eq!(iter.next(), Some(0)); + /// assert_eq!(iter.next(), Some(5)); + /// assert_eq!(iter.next(), None); + /// } + /// + /// println!("{}", list.len()); + /// assert!(list.is_empty()); + /// ``` + pub fn drain(&mut self) -> Drain<'_, T> { + Drain { + head: self.head, + remaining: self.length, + tail: self.tail, + list: self, + } + } + + /// Returns an immutable reference to the value at the front of the list, if it exists. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert_eq!(list.front(), None); + /// + /// list.push_front(0); + /// list.push_front(5); + /// assert_eq!(list.front(), Some(&5)); + /// ``` + #[must_use] + pub fn front(&self) -> Option<&T> { + let index = self.head?.get(); + + match &self.entries[index] { + Entry::Occupied(entry) => Some(&entry.value), + _ => None, + } + } + + /// Returns the index of the value at the front of the list, if it exists. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert_eq!(list.front_index(), None); + /// + /// list.push_front(0); + /// let index = list.push_front(5); + /// assert_eq!(list.front_index(), Some(index)); + /// ``` + #[must_use] + pub fn front_index(&self) -> Option> { + let index = self.head?; + let entry = self.entries[index.get()].occupied_ref(); + let index = Index::new(index, entry.generation); + Some(index) + } + + /// Returns a mutable reference to the value at the front of the list, if it exists. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert_eq!(list.front_mut(), None); + /// + /// list.push_front(0); + /// list.push_front(5); + /// + /// let mut front = list.front_mut().unwrap(); + /// assert_eq!(front, &mut 5); + /// *front *= 2; + /// + /// assert_eq!(list.front(), Some(&10)); + /// ``` + #[must_use] + pub fn front_mut(&mut self) -> Option<&mut T> { + let index = self.head?.get(); + + match &mut self.entries[index] { + Entry::Occupied(entry) => Some(&mut entry.value), + _ => None, + } + } + + /// Returns an immutable reference to the value at the given index. + /// + /// If the index refers to an index not in the list anymore or if the index has been invalidated, then [`None`] will + /// be returned. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// let index = list.push_front(0); + /// assert_eq!(list.get(index), Some(&0)); + /// + /// let index = list.push_front(5); + /// assert_eq!(list.get(index), Some(&5)); + /// ``` + #[must_use] + pub fn get(&self, index: Index) -> Option<&T> { + match self.entries.get(index.index())? { + Entry::Occupied(entry) if entry.generation == index.generation => Some(&entry.value), + _ => None, + } + } + + /// Returns an immutable reference to the value at the given index. + /// + /// Complexity: O(1) + /// + /// # Safety + /// + /// Caller needs to guarantee that the index is in bound, and has never been removed from the + /// list. This function does not perform generation checks. So if an element is removed then a + /// new element is added at the same index, then the returned reference will be to the new + /// element. + #[must_use] + pub unsafe fn get_unchecked(&self, index: Index) -> &T { + match unsafe { self.entries.get_unchecked(index.index()) } { + Entry::Occupied(entry) => &entry.value, + _ => unsafe { unreachable_unchecked() }, + } + } + + /// Returns a mutable reference to the value at the given index. + /// + /// If the index refers to an index not in the list anymore or if the index has been invalidated, then [`None`] will + /// be returned. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// let index = list.push_front(0); + /// let value = list.get_mut(index).unwrap(); + /// *value = 100; + /// assert_eq!(list.get(index), Some(&100)); + /// ``` + #[must_use] + pub fn get_mut(&mut self, index: Index) -> Option<&mut T> { + match self.entries.get_mut(index.index())? { + Entry::Occupied(entry) if entry.generation == index.generation => Some(&mut entry.value), + _ => None, + } + } + + /// Returns an mutable reference to the value at the given index. + /// + /// # Safety + /// + /// Caller needs to guarantee that the index is in bound, and has never been removed from the list. + /// See also: [`VecList::get_unchecked`]. + /// + /// Complexity: O(1) + #[must_use] + pub unsafe fn get_unchecked_mut(&mut self, index: Index) -> &mut T { + match unsafe { self.entries.get_unchecked_mut(index.index()) } { + Entry::Occupied(entry) => &mut entry.value, + _ => unsafe { unreachable_unchecked() }, + } + } + + /// Returns the index of the value next to the value at the given index. + /// + /// If the index refers to an index not in the list anymore or if the index has been invalidated, then [`None`] will + /// be returned. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// + /// let index_1 = list.push_back(0); + /// assert_eq!(list.get_next_index(index_1), None); + /// + /// let index_2 = list.push_back(5); + /// assert_eq!(list.get_next_index(index_1), Some(index_2)); + /// ``` + #[must_use] + pub fn get_next_index(&self, index: Index) -> Option> { + match self.entries.get(index.index())? { + Entry::Occupied(entry) if entry.generation == index.generation => { + let next_index = entry.next?; + let next_entry = self.entries[next_index.get()].occupied_ref(); + Some(Index::new(next_index, next_entry.generation)) + } + _ => None, + } + } + + /// Returns the index of the value previous to the value at the given index. + /// + /// If the index refers to an index not in the list anymore or if the index has been invalidated, then [`None`] will + /// be returned. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// + /// let index_1 = list.push_front(0); + /// assert_eq!(list.get_previous_index(index_1), None); + /// + /// let index_2 = list.push_front(5); + /// assert_eq!(list.get_previous_index(index_1), Some(index_2)); + /// ``` + #[must_use] + pub fn get_previous_index(&self, index: Index) -> Option> { + match self.entries.get(index.index())? { + Entry::Occupied(entry) if entry.generation == index.generation => { + let previous_index = entry.previous?; + let previous_entry = self.entries[previous_index.get()].occupied_ref(); + Some(Index::new(previous_index, previous_entry.generation)) + } + _ => None, + } + } + + /// Connect the node at `index` to the node at `next`. If `index` is `None`, then the head will be + /// set to `next`; if `next` is `None`, then the tail will be set to `index`. + #[inline] + fn update_link(&mut self, index: Option, next: Option) { + if let Some(index) = index { + let entry = self.entries[index.get()].occupied_mut(); + entry.next = next; + } else { + self.head = next + } + if let Some(next) = next { + let entry = self.entries[next.get()].occupied_mut(); + entry.previous = index; + } else { + self.tail = index; + } + } + + /// Move the node at `index` to after the node at `target`. + /// + /// # Panics + /// + /// Panics if either `index` or `target` is invalidated. Also panics if `index` is the same as `target`. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// let index_1 = list.push_back(0); + /// let index_2 = list.push_back(1); + /// let index_3 = list.push_back(2); + /// let index_4 = list.push_back(3); + /// + /// list.move_after(index_1, index_3); + /// assert_eq!(list.iter().copied().collect::>(), vec![1, 2, 0, 3]); + /// assert_eq!(list.iter().rev().copied().collect::>(), vec![3, 0, 2, 1]); + /// ``` + pub fn move_after(&mut self, index: Index, target: Index) { + let (previous_index, next_index) = match &self.entries[index.index()] { + Entry::Occupied(entry) if entry.generation == index.generation => { + (entry.previous, entry.next) + } + _ => panic!("expected occupied entry with correct generation at `index`"), + }; + let target_next_index = match &self.entries[target.index()] { + Entry::Occupied(entry) if entry.generation == target.generation => entry.next, + _ => panic!("expected occupied entry with correct generation at `target`"), + }; + if target.index == index.index { + panic!("cannot move after `index` itself"); + } + if previous_index == Some(target.index) { + // Already in the right place + return; + } + self.update_link(previous_index, next_index); + self.update_link(Some(target.index), Some(index.index)); + self.update_link(Some(index.index), target_next_index); + } + + /// Move the node at `index` to before the node at `target`. + /// + /// # Panics + /// + /// Panics if either `index` or `target` is invalidated. Also panics if `index` is the same as `target`. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// let index_1 = list.push_back(0); + /// let index_2 = list.push_back(1); + /// let index_3 = list.push_back(2); + /// let index_4 = list.push_back(3); + /// + /// list.move_before(index_1, index_3); + /// assert_eq!(list.iter().copied().collect::>(), vec![1, 0, 2, 3]); + /// assert_eq!(list.iter().rev().copied().collect::>(), vec![3, 2, 0, 1]); + /// ``` + pub fn move_before(&mut self, index: Index, target: Index) { + let (previous_index, next_index) = match &self.entries[index.index()] { + Entry::Occupied(entry) if entry.generation == index.generation => { + (entry.previous, entry.next) + } + _ => panic!("expected occupied entry with correct generation at `index`"), + }; + let target_previous_index = match &self.entries[target.index()] { + Entry::Occupied(entry) if entry.generation == target.generation => entry.previous, + _ => panic!("expected occupied entry with correct generation at `target`"), + }; + if target.index == index.index { + panic!("cannot move before `index` itself"); + } + if next_index == Some(target.index) { + // Already in the right place + return; + } + self.update_link(previous_index, next_index); + self.update_link(Some(index.index), Some(target.index)); + self.update_link(target_previous_index, Some(index.index)); + } + + /// Creates an indices iterator which will yield all indices of the list in order. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// list.push_front(0); + /// list.push_front(5); + /// + /// let mut indices = list.indices(); + /// let index = indices.next().unwrap(); + /// assert_eq!(list.get(index), Some(&5)); + /// + /// let index = indices.next().unwrap(); + /// assert_eq!(list.get(index), Some(&0)); + /// + /// assert_eq!(indices.next(), None); + /// ``` + #[must_use] + pub fn indices(&self) -> Indices<'_, T> { + Indices { + entries: &self.entries, + head: self.head, + remaining: self.length, + tail: self.tail, + } + } + + /// Inserts the given value after the value at the given index. + /// + /// The index of the newly inserted value will be returned. + /// + /// Complexity: amortized O(1) + /// + /// # Panics + /// + /// Panics if the index refers to an index not in the list anymore or if the index has been invalidated. This is + /// enforced because this function will consume the value to be inserted, and if it cannot be inserted (due to the + /// index not being valid), then it will be lost. + /// + /// Also panics if the new capacity overflows `usize`. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// list.push_front(0); + /// let index_1 = list.push_front(5); + /// list.push_front(10); + /// + /// let index_2 = list.insert_after(index_1, 1000); + /// assert_eq!(list.get_next_index(index_1), Some(index_2)); + /// ``` + pub fn insert_after(&mut self, index: Index, value: T) -> Index { + let next_index = match &mut self.entries[index.index()] { + Entry::Occupied(entry) if entry.generation == index.generation => entry.next, + _ => panic!("expected occupied entry with correct generation"), + }; + let new_index = self.insert_new(value, Some(index.index), next_index); + let entry = self.entries[index.index()].occupied_mut(); + entry.next = Some(new_index); + + if Some(index.index) == self.tail { + self.tail = Some(new_index); + } + + if let Some(next_index) = next_index { + self.entries[next_index.get()].occupied_mut().previous = Some(new_index); + } + + Index::new(new_index, self.generation) + } + + /// Inserts the given value before the value at the given index. + /// + /// The index of the newly inserted value will be returned. + /// + /// Complexity: amortized O(1) + /// + /// # Panics + /// + /// Panics if the index refers to an index not in the list anymore or if the index has been invalidated. This is + /// enforced because this function will consume the value to be inserted, and if it cannot be inserted (due to the + /// index not being valid), then it will be lost. + /// + /// Also panics if the new capacity overflows `usize`. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// list.push_front(0); + /// let index_1 = list.push_front(5); + /// list.push_front(10); + /// + /// let index_2 = list.insert_before(index_1, 1000); + /// assert_eq!(list.get_previous_index(index_1), Some(index_2)); + /// ``` + pub fn insert_before(&mut self, index: Index, value: T) -> Index { + let previous_index = match &mut self.entries[index.index()] { + Entry::Occupied(entry) if entry.generation == index.generation => entry.previous, + _ => panic!("expected occupied entry with correct generation"), + }; + let new_index = self.insert_new(value, previous_index, Some(index.index)); + let entry = self.entries[index.index()].occupied_mut(); + entry.previous = Some(new_index); + + if Some(index.index) == self.head { + self.head = Some(new_index); + } + + if let Some(previous_index) = previous_index { + self.entries[previous_index.get()].occupied_mut().next = Some(new_index); + } + + Index::new(new_index, self.generation) + } + + /// Inserts the given value into the list with the assumption that it is currently empty. + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + fn insert_empty(&mut self, value: T) -> Index { + let generation = self.generation; + let index = self.insert_new(value, None, None); + self.head = Some(index); + self.tail = Some(index); + Index::new(index, generation) + } + + /// Inserts the given value into the list with its expected previous and next value indices. + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + fn insert_new( + &mut self, + value: T, + previous: Option, + next: Option, + ) -> NonMaxUsize { + self.length += 1; + + if self.length == usize::max_value() { + panic!("reached maximum possible length"); + } + + match self.vacant_head { + Some(index) => { + self.vacant_head = self.entries[index.get()].vacant_ref().next; + self.entries[index.get()] = + Entry::Occupied(OccupiedEntry::new(self.generation, previous, next, value)); + index + } + None => { + self.entries.push(Entry::Occupied(OccupiedEntry::new( + self.generation, + previous, + next, + value, + ))); + NonMaxUsize::new(self.entries.len() - 1).unwrap() + } + } + } + + /// Returns whether or not the list is empty. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert!(list.is_empty()); + /// + /// list.push_back(0); + /// assert!(!list.is_empty()); + /// ``` + #[must_use] + pub fn is_empty(&self) -> bool { + self.length == 0 + } + + /// Creates an iterator that yields immutable references to values in the list in order. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// list.push_back(0); + /// list.push_back(10); + /// list.push_back(200); + /// list.push_back(-10); + /// + /// let mut iter = list.iter(); + /// assert_eq!(iter.next(), Some(&0)); + /// assert_eq!(iter.next(), Some(&10)); + /// assert_eq!(iter.next(), Some(&200)); + /// assert_eq!(iter.next(), Some(&-10)); + /// assert_eq!(iter.next(), None); + /// ``` + #[must_use] + pub fn iter(&self) -> Iter<'_, T> { + Iter { + entries: &self.entries, + head: self.head, + remaining: self.length, + tail: self.tail, + } + } + + /// Creates an iterator that yields mutable references to values in the list in order. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// list.push_back(0); + /// list.push_back(10); + /// list.push_back(200); + /// list.push_back(-10); + /// + /// let mut iter = list.iter_mut(); + /// assert_eq!(iter.next(), Some(&mut 0)); + /// assert_eq!(iter.next(), Some(&mut 10)); + /// assert_eq!(iter.next(), Some(&mut 200)); + /// assert_eq!(iter.next(), Some(&mut -10)); + /// assert_eq!(iter.next(), None); + /// ``` + #[must_use] + pub fn iter_mut(&mut self) -> IterMut<'_, T> { + IterMut { + entries: &mut self.entries, + head: self.head, + phantom: PhantomData, + remaining: self.length, + tail: self.tail, + } + } + + /// Returns the number of values in the list. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert_eq!(list.len(), 0); + /// + /// list.push_back(0); + /// list.push_back(1); + /// list.push_back(2); + /// assert_eq!(list.len(), 3); + /// ``` + #[must_use] + pub fn len(&self) -> usize { + self.length + } + + /// Creates a new list with no initial capacity. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// let index = list.push_back(0); + /// assert_eq!(list.get(index), Some(&0)); + /// ``` + #[must_use] + pub fn new() -> Self { + VecList::default() + } + + /// Reorganizes the existing values to ensure maximum cache locality and shrinks the list such that the capacity is + /// exactly [`minimum_capacity`]. + /// + /// This function can be used to actually increase the capacity of the list. + /// + /// Complexity: O(n) + /// + /// # Panics + /// + /// Panics if the given minimum capacity is less than the current length of the list. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// let index_1 = list.push_back(5); + /// let index_2 = list.push_back(10); + /// let index_3 = list.push_front(100); + /// list.remove(index_1); + /// + /// assert!(list.capacity() >= 3); + /// + /// let mut map = list.pack_to(list.len() + 5); + /// assert_eq!(list.capacity(), 7); + /// assert_eq!(map.len(), 2); + /// + /// let index_2 = map.remove(&index_2).unwrap(); + /// let index_3 = map.remove(&index_3).unwrap(); + /// + /// assert_eq!(list.get(index_2), Some(&10)); + /// assert_eq!(list.get(index_3), Some(&100)); + /// + /// let mut iter = list.iter(); + /// assert_eq!(iter.next(), Some(&100)); + /// assert_eq!(iter.next(), Some(&10)); + /// assert_eq!(iter.next(), None); + /// ``` + #[cfg(feature = "std")] + pub fn pack_to(&mut self, minimum_capacity: usize) -> HashMap, Index> { + assert!( + minimum_capacity >= self.length, + "cannot shrink to capacity lower than current length" + ); + + let mut count = NonMaxUsize::zero(); + let mut entries = Vec::with_capacity(minimum_capacity); + let generation = create_initial_generation(); + let length = self.length; + let mut map = HashMap::with_capacity(length); + let mut next_index = self.head; + + while let Some(index) = next_index { + let mut entry = self.remove_entry(index).expect("expected occupied entry"); + next_index = entry.next; + + let _ = map.insert( + Index::new(index, entry.generation), + Index::new(count, generation), + ); + + entry.generation = generation; + entry.previous = if count > 0 { + Some(count.checked_sub(1).unwrap()) + } else { + None + }; + entry.next = if count < length - 1 { + Some(count.checked_add(1).expect("overflow")) + } else { + None + }; + + entries.push(Entry::Occupied(entry)); + count = count.checked_add(1).expect("overflow"); + } + + self.entries = entries; + self.generation = generation; + self.length = length; + self.vacant_head = None; + + if self.length > 0 { + self.head = Some(NonMaxUsize::zero()); + // Safety: `self.length - 1` is always less than `usize::MAX`. + self.tail = Some(unsafe { NonMaxUsize::new_unchecked(length - 1) }); + } else { + self.head = None; + self.tail = None; + } + + map + } + + /// Reorganizes the existing values to ensure maximum cache locality and shrinks the list such that no additional + /// capacity exists. + /// + /// This is equivalent to calling [`VecList::pack_to`] with the current length. + /// + /// Complexity: O(n) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// let index_1 = list.push_back(5); + /// let index_2 = list.push_back(10); + /// let index_3 = list.push_front(100); + /// list.remove(index_1); + /// + /// assert!(list.capacity() >= 3); + /// + /// let mut map = list.pack_to_fit(); + /// assert_eq!(list.capacity(), 2); + /// assert_eq!(map.len(), 2); + /// + /// let index_2 = map.remove(&index_2).unwrap(); + /// let index_3 = map.remove(&index_3).unwrap(); + /// + /// assert_eq!(list.get(index_2), Some(&10)); + /// assert_eq!(list.get(index_3), Some(&100)); + /// + /// let mut iter = list.iter(); + /// assert_eq!(iter.next(), Some(&100)); + /// assert_eq!(iter.next(), Some(&10)); + /// assert_eq!(iter.next(), None); + /// ``` + #[cfg(feature = "std")] + pub fn pack_to_fit(&mut self) -> HashMap, Index> { + self.pack_to(self.length) + } + + /// Removes and returns the value at the back of the list, if it exists. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert_eq!(list.pop_back(), None); + /// + /// list.push_back(0); + /// list.push_back(1); + /// list.push_back(2); + /// assert_eq!(list.len(), 3); + /// + /// assert_eq!(list.pop_back(), Some(2)); + /// assert_eq!(list.len(), 2); + /// ``` + pub fn pop_back(&mut self) -> Option { + self.remove_entry(self.tail?).map(|entry| entry.value) + } + + /// Removes and returns the value at the front of the list, if it exists. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// assert_eq!(list.pop_front(), None); + /// + /// list.push_front(0); + /// list.push_front(1); + /// list.push_front(2); + /// assert_eq!(list.len(), 3); + /// + /// assert_eq!(list.pop_front(), Some(2)); + /// assert_eq!(list.len(), 2); + /// ``` + pub fn pop_front(&mut self) -> Option { + self.remove_entry(self.head?).map(|entry| entry.value) + } + + /// Inserts the given value to the back of the list. + /// + /// The index of the newly inserted value will be returned. + /// + /// Complexity: amortized O(1) + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// let index = list.push_back(0); + /// assert_eq!(list.get(index), Some(&0)); + /// ``` + pub fn push_back(&mut self, value: T) -> Index { + let tail_index = match self.tail { + Some(index) => index, + None => return self.insert_empty(value), + }; + let index = self.insert_new(value, Some(tail_index), None); + self.entries[tail_index.get()].occupied_mut().next = Some(index); + self.tail = Some(index); + Index::new(index, self.generation) + } + + /// Inserts the given value to the front of the list. + /// + /// The index of the newly inserted value will be returned. + /// + /// Complexity: amortized O(1) + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// let index = list.push_front(0); + /// assert_eq!(list.get(index), Some(&0)); + /// ``` + pub fn push_front(&mut self, value: T) -> Index { + let head_index = match self.head { + Some(index) => index, + None => return self.insert_empty(value), + }; + let index = self.insert_new(value, None, Some(head_index)); + self.entries[head_index.get()].occupied_mut().previous = Some(index); + self.head = Some(index); + Index::new(index, self.generation) + } + + /// Removes and returns the value at the given index, if it exists. + /// + /// If the index refers to an index not in the list anymore or if the index has been invalidated, then [`None`] will + /// be returned and the list will be unaffected. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// let index = list.push_back(0); + /// assert_eq!(list.remove(index), Some(0)); + /// assert_eq!(list.remove(index), None); + /// ``` + pub fn remove(&mut self, index: Index) -> Option { + let (previous_index, next_index) = match &self.entries[index.index()] { + Entry::Occupied(entry) if entry.generation == index.generation => { + (entry.previous, entry.next) + } + _ => return None, + }; + Some( + self + .remove_helper(previous_index, index.index, next_index) + .value, + ) + } + + /// Removes and returns the entry at the given index, if it exists. + /// + /// If the index refers to an index not in the list anymore or if the index has been invalidated, then [`None`] will + /// be returned and the list will be unaffected. + fn remove_entry(&mut self, index: NonMaxUsize) -> Option> { + let (previous_index, next_index) = match &self.entries[index.get()] { + Entry::Occupied(entry) => (entry.previous, entry.next), + Entry::Vacant(_) => return None, + }; + Some(self.remove_helper(previous_index, index, next_index)) + } + + /// Removes and returns the entry at the given index with the entries previous and next index + /// values. + /// + /// It is assumed that there is an entry at the given index. + /// + /// # Panics + /// + /// Panics if called when the list is empty. Behavior is undefined if provided indices do not follow the expected + /// constraints. + fn remove_helper( + &mut self, + previous_index: Option, + index: NonMaxUsize, + next_index: Option, + ) -> OccupiedEntry { + let head_index = self.head.expect("expected head index"); + let tail_index = self.tail.expect("expected tail index"); + let vacant_head = self.vacant_head; + let removed_entry = mem::replace( + &mut self.entries[index.get()], + Entry::Vacant(VacantEntry::new(vacant_head)), + ); + + self.generation = self.generation.wrapping_add(1); + self.length -= 1; + self.vacant_head = Some(index); + + if index == head_index && index == tail_index { + self.head = None; + self.tail = None; + } else if index == head_index { + self.entries[next_index.expect("expected next entry to exist").get()] + .occupied_mut() + .previous = None; + self.head = next_index; + } else if index == tail_index { + self.entries[previous_index + .expect("expected previous entry to exist") + .get()] + .occupied_mut() + .next = None; + self.tail = previous_index; + } else { + self.entries[next_index.expect("expected next entry to exist").get()] + .occupied_mut() + .previous = previous_index; + self.entries[previous_index + .expect("expected previous entry to exist") + .get()] + .occupied_mut() + .next = next_index; + } + + removed_entry.occupied() + } + + /// Reserves capacity for the given expected size increase. + /// + /// The collection may reserve more space to avoid frequent reallocations. After calling this function, capacity will + /// be greater than or equal to `self.len() + additional_capacity`. Does nothing if the current capacity is already + /// sufficient. + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list: VecList = VecList::new(); + /// assert_eq!(list.capacity(), 0); + /// + /// list.reserve(10); + /// assert!(list.capacity() >= 10); + /// ``` + pub fn reserve(&mut self, additional_capacity: usize) { + self.entries.reserve(additional_capacity); + } + + /// Removes all elements from the list not satisfying the given predicate. + /// + /// Complexity: O(n) + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list = VecList::new(); + /// list.push_back(0); + /// list.push_back(-1); + /// list.push_back(1); + /// list.push_back(-2); + /// list.retain(|&mut value| value >= 0); + /// + /// let mut iter = list.iter(); + /// assert_eq!(iter.next(), Some(&0)); + /// assert_eq!(iter.next(), Some(&1)); + /// assert_eq!(iter.next(), None); + /// ``` + pub fn retain(&mut self, mut predicate: Predicate) + where + Predicate: FnMut(&mut T) -> bool, + { + let mut next_index = self.head; + + while let Some(index) = next_index { + let entry = self.entries[index.get()].occupied_mut(); + next_index = entry.next; + + if !predicate(&mut entry.value) { + let _ = self.remove_entry(index); + } + } + } + + /// Creates a new list with the given capacity. + /// + /// # Examples + /// + /// ``` + /// use dlv_list::VecList; + /// + /// let mut list: VecList = VecList::new(); + /// assert_eq!(list.capacity(), 0); + /// + /// let mut list: VecList = VecList::with_capacity(10); + /// assert_eq!(list.capacity(), 10); + /// ``` + #[must_use] + pub fn with_capacity(capacity: usize) -> Self { + VecList { + entries: Vec::with_capacity(capacity), + generation: create_initial_generation(), + head: None, + length: 0, + tail: None, + vacant_head: None, + } + } +} + +impl Debug for VecList +where + T: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.debug_list().entries(self.iter()).finish() + } +} + +impl Default for VecList { + fn default() -> Self { + VecList { + entries: Vec::default(), + generation: create_initial_generation(), + head: None, + length: 0, + tail: None, + vacant_head: None, + } + } +} + +impl Eq for VecList where T: Eq {} + +impl Extend for VecList { + fn extend(&mut self, iter: Iter) + where + Iter: IntoIterator, + { + let iter = iter.into_iter(); + self.reserve(iter.size_hint().0); + + for value in iter { + let _ = self.push_back(value); + } + } +} + +impl<'a, T> Extend<&'a T> for VecList +where + T: 'a + Copy, +{ + fn extend(&mut self, iter: Iter) + where + Iter: IntoIterator, + { + self.extend(iter.into_iter().copied()); + } +} + +impl FromIterator for VecList { + fn from_iter(iter: Iter) -> Self + where + Iter: IntoIterator, + { + let mut list = VecList::new(); + list.extend(iter); + list + } +} + +impl Hash for VecList +where + T: Hash, +{ + fn hash(&self, state: &mut StateHasher) + where + StateHasher: Hasher, + { + self.len().hash(state); + + for value in self { + value.hash(state); + } + } +} + +impl ops::Index> for VecList { + type Output = T; + + fn index(&self, index: Index) -> &Self::Output { + self.get(index).expect("expected entry at index") + } +} + +impl ops::IndexMut> for VecList { + fn index_mut(&mut self, index: Index) -> &mut Self::Output { + self.get_mut(index).expect("expected entry at index") + } +} + +impl IntoIterator for VecList { + type IntoIter = IntoIter; + type Item = T; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { + head: self.head, + remaining: self.length, + tail: self.tail, + list: self, + } + } +} + +impl<'a, T> IntoIterator for &'a VecList { + type IntoIter = Iter<'a, T>; + type Item = &'a T; + + fn into_iter(self) -> Self::IntoIter { + Iter { + entries: &self.entries, + head: self.head, + remaining: self.length, + tail: self.tail, + } + } +} + +impl<'a, T> IntoIterator for &'a mut VecList { + type IntoIter = IterMut<'a, T>; + type Item = &'a mut T; + + fn into_iter(self) -> Self::IntoIter { + IterMut { + entries: &mut self.entries, + head: self.head, + phantom: PhantomData, + remaining: self.length, + tail: self.tail, + } + } +} + +impl Ord for VecList +where + T: Ord, +{ + fn cmp(&self, other: &Self) -> Ordering { + self.iter().cmp(other) + } +} + +impl PartialEq for VecList +where + T: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + self.len() == other.len() && self.iter().eq(other) + } +} + +impl PartialEq> for VecList +where + T: PartialEq, +{ + fn eq(&self, other: &LinkedList) -> bool { + self.len() == other.len() && self.iter().eq(other) + } +} + +impl PartialEq> for LinkedList +where + T: PartialEq, +{ + fn eq(&self, other: &VecList) -> bool { + other == self + } +} + +impl PartialEq> for VecList +where + T: PartialEq, +{ + fn eq(&self, other: &Vec) -> bool { + self.len() == other.len() && self.iter().eq(other) + } +} + +impl PartialEq> for Vec +where + T: PartialEq, +{ + fn eq(&self, other: &VecList) -> bool { + other == self + } +} + +impl PartialEq<[T; N]> for VecList +where + T: PartialEq, +{ + fn eq(&self, other: &[T; N]) -> bool { + self.len() == other.len() && self.iter().eq(other.iter()) + } +} + +impl PartialEq> for [T; N] +where + T: PartialEq, +{ + fn eq(&self, other: &VecList) -> bool { + other == self + } +} + +impl<'a, T> PartialEq<&'a [T]> for VecList +where + T: PartialEq, +{ + fn eq(&self, other: &&'a [T]) -> bool { + self.len() == other.len() && self.iter().eq(other.iter()) + } +} + +impl PartialEq> for &'_ [T] +where + T: PartialEq, +{ + fn eq(&self, other: &VecList) -> bool { + other == self + } +} + +impl PartialOrd for VecList +where + T: PartialOrd, +{ + fn partial_cmp(&self, other: &Self) -> Option { + self.iter().partial_cmp(other) + } +} + +/// A wrapper type that indicates an index into the list. +/// +/// This index may be invalidated by operations on the list itself. +pub struct Index { + /// The generation of the entry currently at this index. This is used to avoid the ABA problem. + generation: u64, + + /// The actual index into the entry list. + index: NonMaxUsize, + + /// This type is parameterized on the entry data type to avoid indices being used across differently typed lists. + phantom: PhantomData, +} + +impl Clone for Index { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Index {} + +impl Debug for Index { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter + .debug_tuple("Index") + .field(&self.index) + .field(&self.generation) + .finish() + } +} + +impl Eq for Index {} + +impl Hash for Index { + fn hash(&self, hasher: &mut StateHasher) + where + StateHasher: Hasher, + { + self.index.hash(hasher); + self.generation.hash(hasher); + } +} + +impl PartialEq for Index { + fn eq(&self, other: &Self) -> bool { + self.generation == other.generation && self.index == other.index + } +} + +impl Index { + /// Convenience function for creating new index. + #[must_use] + pub(self) fn new(index: NonMaxUsize, generation: u64) -> Index { + Index { + generation, + index, + phantom: PhantomData, + } + } + + /// Get the index as usize + #[inline] + pub(self) fn index(&self) -> usize { + self.index.get() + } +} + +/// An entry in the list. This can be either occupied or vacant. +#[derive(Clone)] +enum Entry { + /// An occupied entry contains actual entry data inserted by the user. + Occupied(OccupiedEntry), + + /// A vacant entry is one that can be reused. + Vacant(VacantEntry), +} + +impl Entry { + /// Returns the occupied entry by moving it out of the entry. + /// + /// # Panics + /// + /// Panics if the variant is actually [`Entry::Vacant`]. + #[must_use] + pub fn occupied(self) -> OccupiedEntry { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(_) => panic!("expected occupied entry"), + } + } + + /// Returns an immutable reference to the occupied entry. + /// + /// # Panics + /// + /// Panics if the variant is actually [`Entry::Vacant`]. + #[must_use] + pub fn occupied_ref(&self) -> &OccupiedEntry { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(_) => panic!("expected occupied entry"), + } + } + + /// Returns a mutable reference to the occupied entry. + /// + /// # Panics + /// + /// Panics if the variant is actually [`Entry::Vacant`]. + #[must_use] + pub fn occupied_mut(&mut self) -> &mut OccupiedEntry { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(_) => panic!("expected occupied entry"), + } + } + + /// Returns an immutable reference to the vacant entry. + /// + /// # Panics + /// + /// Panics if the variant is actually [`Entry::Occupied`]. + #[must_use] + pub fn vacant_ref(&self) -> &VacantEntry { + match self { + Entry::Vacant(entry) => entry, + Entry::Occupied(_) => panic!("expected vacant entry"), + } + } +} + +/// An occupied entry in the list. +#[derive(Clone)] +struct OccupiedEntry { + /// The generation of when this entry was inserted. This is used to avoid the ABA problem. + generation: u64, + + /// The index of the next occupied entry in the list. + next: Option, + + /// The index of the previous occupied entry in the list. + previous: Option, + + /// The actual value being stored in this entry. + value: T, +} + +impl OccupiedEntry { + /// Convenience function for creating a new occupied entry. + #[must_use] + pub fn new( + generation: u64, + previous: Option, + next: Option, + value: T, + ) -> OccupiedEntry { + OccupiedEntry { + generation, + next, + previous, + value, + } + } +} + +/// A vacant entry in the list. +#[derive(Clone, Debug)] +struct VacantEntry { + /// The index of the next vacant entry in the list. + next: Option, +} + +impl VacantEntry { + /// Convenience function for creating a new vacant entry. + #[must_use] + pub fn new(next: Option) -> VacantEntry { + VacantEntry { next } + } +} + +/// An iterator that yields and removes all entries from the list. +pub struct Drain<'a, T> { + /// The index of the head of the unvisited portion of the list. + head: Option, + + /// A reference to the entry list. + list: &'a mut VecList, + + /// The number of entries that have not been visited. + remaining: usize, + + /// The index of the tail of the unvisited portion of the list. + tail: Option, +} + +impl Drain<'_, T> { + /// Creates an iterator that yields immutable references to entries in the list. + #[must_use] + pub fn iter(&self) -> Iter<'_, T> { + Iter { + entries: &self.list.entries, + head: self.head, + remaining: self.remaining, + tail: self.tail, + } + } +} + +impl Debug for Drain<'_, T> +where + T: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("Drain(")?; + formatter.debug_list().entries(self.iter()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for Drain<'_, T> { + fn next_back(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.tail.map(|index| { + let entry = self + .list + .remove_entry(index) + .expect("expected occupied entry"); + self.tail = entry.previous; + self.remaining -= 1; + entry.value + }) + } + } +} + +impl Drop for Drain<'_, T> { + fn drop(&mut self) { + self.list.clear(); + } +} + +impl ExactSizeIterator for Drain<'_, T> {} + +impl FusedIterator for Drain<'_, T> {} + +impl Iterator for Drain<'_, T> { + type Item = T; + + fn next(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.head.map(|index| { + let entry = self + .list + .remove_entry(index) + .expect("expected occupied entry"); + self.head = entry.next; + self.remaining -= 1; + entry.value + }) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.remaining, Some(self.remaining)) + } +} + +/// An iterator that yields all indices in the list. +pub struct Indices<'a, T> { + /// A reference to the actual storage for the entry list. + entries: &'a Vec>, + + /// The index of the head of the unvisited portion of the list. + head: Option, + + /// The number of entries that have not been visited. + remaining: usize, + + /// The index of the tail of the unvisited portion of the list. + tail: Option, +} + +impl Clone for Indices<'_, T> { + fn clone(&self) -> Self { + Indices { + entries: self.entries, + head: self.head, + remaining: self.remaining, + tail: self.tail, + } + } +} + +impl Debug for Indices<'_, T> +where + T: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("Indices(")?; + formatter.debug_list().entries(self.clone()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for Indices<'_, T> { + fn next_back(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.tail.map(|index| { + let entry = self.entries[index.get()].occupied_ref(); + let index = Index::new(index, entry.generation); + self.tail = entry.previous; + self.remaining -= 1; + index + }) + } + } +} + +impl ExactSizeIterator for Indices<'_, T> {} + +impl FusedIterator for Indices<'_, T> {} + +impl Iterator for Indices<'_, T> { + type Item = Index; + + fn next(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.head.map(|index| { + let entry = self.entries[index.get()].occupied_ref(); + let index = Index::new(index, entry.generation); + self.head = entry.next; + self.remaining -= 1; + index + }) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.remaining, Some(self.remaining)) + } +} + +/// An iterator that moves all entries out of the entry list. +#[derive(Clone)] +pub struct IntoIter { + /// The index of the head of the unvisited portion of the list. + head: Option, + + /// The entry list from which entries are yielded. + list: VecList, + + /// The number of entries that have not been visited. + remaining: usize, + + /// The index of the tail of the unvisited portion of the list. + tail: Option, +} + +impl IntoIter { + /// Creates an iterator that yields immutable references to entries in the list. + #[must_use] + pub fn iter(&self) -> Iter<'_, T> { + Iter { + entries: &self.list.entries, + head: self.head, + remaining: self.remaining, + tail: self.tail, + } + } +} + +impl Debug for IntoIter +where + T: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("IntoIter(")?; + formatter.debug_list().entries(self.iter()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for IntoIter { + fn next_back(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.tail.map(|index| { + let entry = self + .list + .remove_entry(index) + .expect("expected occupied entry"); + self.tail = entry.previous; + self.remaining -= 1; + entry.value + }) + } + } +} + +impl ExactSizeIterator for IntoIter {} + +impl FusedIterator for IntoIter {} + +impl Iterator for IntoIter { + type Item = T; + + fn next(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.head.map(|index| { + let entry = self + .list + .remove_entry(index) + .expect("expected occupied entry"); + self.head = entry.next; + self.remaining -= 1; + entry.value + }) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.remaining, Some(self.remaining)) + } +} + +/// An iterator that yields immutable references to entries in the list. +pub struct Iter<'a, T> { + /// A reference to the actual storage for the entry list. + entries: &'a Vec>, + + /// The index of the head of the unvisited portion of the list. + head: Option, + + /// The number of entries that have not been visited. + remaining: usize, + + /// The index of the tail of the unvisited portion of the list. + tail: Option, +} + +impl<'a, T> Clone for Iter<'a, T> { + fn clone(&self) -> Iter<'a, T> { + Iter { + entries: self.entries, + head: self.head, + remaining: self.remaining, + tail: self.tail, + } + } +} + +impl Debug for Iter<'_, T> +where + T: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("Iter(")?; + formatter.debug_list().entries(self.clone()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for Iter<'_, T> { + fn next_back(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.tail.map(|index| { + let entry = self.entries[index.get()].occupied_ref(); + self.tail = entry.previous; + self.remaining -= 1; + &entry.value + }) + } + } +} + +impl ExactSizeIterator for Iter<'_, T> {} + +impl FusedIterator for Iter<'_, T> {} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.head.map(|index| { + let entry = self.entries[index.get()].occupied_ref(); + self.head = entry.next; + self.remaining -= 1; + &entry.value + }) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.remaining, Some(self.remaining)) + } +} + +/// An iterator that yields mutable references to entries in the list. +pub struct IterMut<'a, T> { + entries: *mut Vec>, + + /// The index of the head of the unvisited portion of the list. + head: Option, + + /// Because [`IterMut::entries`] is a pointer, we need to have a phantom data here for the lifetime parameter. + phantom: PhantomData<&'a mut Vec>>, + + /// The number of entries that have not been visited. + remaining: usize, + + /// The index of the tail of the unvisited portion of the list. + tail: Option, +} + +impl IterMut<'_, T> { + /// Creates an iterator that yields immutable references to entries in the list. + #[must_use] + pub fn iter(&self) -> Iter<'_, T> { + Iter { + entries: unsafe { &*self.entries }, + head: self.head, + remaining: self.remaining, + tail: self.tail, + } + } +} + +impl Debug for IterMut<'_, T> +where + T: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("IterMut(")?; + formatter.debug_list().entries(self.iter()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for IterMut<'_, T> { + fn next_back(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.tail.map(|index| { + let entry = unsafe { &mut (*self.entries)[index.get()] }.occupied_mut(); + self.tail = entry.previous; + self.remaining -= 1; + &mut entry.value + }) + } + } +} + +impl ExactSizeIterator for IterMut<'_, T> {} + +impl FusedIterator for IterMut<'_, T> {} + +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.head.map(|index| { + let entry = unsafe { &mut (*self.entries)[index.get()] }.occupied_mut(); + self.head = entry.next; + self.remaining -= 1; + &mut entry.value + }) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.remaining, Some(self.remaining)) + } +} + +unsafe impl Send for IterMut<'_, T> where T: Send {} + +unsafe impl Sync for IterMut<'_, T> where T: Sync {} + +/// Creates the initial generation seeded by the current time. +#[must_use] +fn create_initial_generation() -> u64 { + #[cfg(feature = "std")] + { + use std::{collections::hash_map::RandomState, hash::BuildHasher}; + + let mut hasher = RandomState::new().build_hasher(); + hasher.write_u32(0); + hasher.finish() + } + + #[cfg(not(feature = "std"))] + { + use core::sync::atomic::{AtomicU32, Ordering}; + + // Generate a u32 randomly. + #[cfg_attr(mutants, mutants::skip)] + fn gen_u32() -> u32 { + static SEED: AtomicU32 = AtomicU32::new({ + // Random seed generated at compile time. + const_random::const_random!(u32) + }); + + // Xorshift is "good enough" in most cases. + let mut x = SEED.load(Ordering::Relaxed); + + loop { + let mut random = x; + random ^= random << 13; + random ^= random >> 17; + random ^= random << 5; + + // Put the new seed in. + if let Err(actual) = SEED.compare_exchange(x, random, Ordering::SeqCst, Ordering::SeqCst) { + x = actual; + } else { + return random; + } + } + } + + // Put two u32's together + gen_u32() as u64 | ((gen_u32() as u64) << 32) + } +} + +#[allow(unused_results)] +#[cfg(test)] +mod test { + use coverage_helper::test; + + use super::*; + use alloc::{format, vec}; + + #[cfg(feature = "std")] + use std::{collections::hash_map::RandomState, hash::BuildHasher}; + + #[test] + fn test_bounds() { + fn check_bounds() {} + + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + } + + #[cfg(feature = "std")] + #[test] + fn test_non_max_usize_eq() { + let zero = NonMaxUsize::zero(); + assert_eq!(zero, 0usize); + assert_ne!(zero, 1usize); + } + + #[test] + fn test_drain_debug() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let drain = list.drain(); + assert_eq!(format!("{drain:?}"), "Drain([0, 1, -1, 2, -2])"); + } + + #[test] + fn test_drain_double_ended() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let mut drain = list.drain(); + assert_eq!(drain.next(), Some(0)); + assert_eq!(drain.next_back(), Some(-2)); + assert_eq!(drain.next(), Some(1)); + assert_eq!(drain.next_back(), Some(2)); + assert_eq!(drain.next(), Some(-1)); + assert_eq!(drain.next_back(), None); + } + + #[test] + fn test_drain_empty() { + let mut list: VecList = VecList::new(); + let mut drain = list.drain(); + assert_eq!(drain.next(), None); + } + + #[test] + fn test_drain_fused() { + let mut list: VecList = VecList::new(); + list.push_back(0); + let mut drain = list.drain(); + assert_eq!(drain.next(), Some(0)); + assert_eq!(drain.next(), None); + assert_eq!(drain.next(), None); + assert_eq!(drain.next(), None); + } + + #[test] + fn test_drain_size_hint() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let mut drain = list.drain(); + + assert_eq!(drain.size_hint(), (5, Some(5))); + drain.next(); + assert_eq!(drain.size_hint(), (4, Some(4))); + drain.next(); + assert_eq!(drain.size_hint(), (3, Some(3))); + drain.next(); + assert_eq!(drain.size_hint(), (2, Some(2))); + drain.next(); + assert_eq!(drain.size_hint(), (1, Some(1))); + drain.next(); + assert_eq!(drain.size_hint(), (0, Some(0))); + } + + #[test] + fn test_index_debug() { + let mut list = VecList::new(); + let index = list.push_back(5); + + assert_eq!( + format!("{index:?}"), + format!("Index(0, {})", index.generation) + ); + } + + #[test] + fn test_index_equality() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.indices().next().unwrap(); + assert_eq!(index_1, index_2); + + let index_3 = list.push_back(1); + assert_ne!(index_1, index_3); + } + + #[cfg(feature = "std")] + #[test] + fn test_index_hash() { + let state = RandomState::new(); + + fn hash(state: &RandomState, value: &Index) -> u64 { + let mut hasher = state.build_hasher(); + value.hash(&mut hasher); + hasher.finish() + } + + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(2); + + assert_eq!(hash(&state, &index_1), hash(&state, &index_1)); + assert_ne!(hash(&state, &index_1), hash(&state, &index_2)); + } + + #[test] + fn test_indices_debug() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let indices = list.indices(); + assert_eq!( + format!("{indices:?}"), + format!( + "Indices([Index(0, {}), Index(1, {}), Index(2, {}), Index(3, {}), Index(4, {})])", + list.generation, list.generation, list.generation, list.generation, list.generation + ) + ); + } + + #[test] + fn test_indices_double_ended() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let mut indices = list.indices(); + assert_eq!(indices.next().unwrap().index.get(), 0); + assert_eq!(indices.next_back().unwrap().index.get(), 4); + assert_eq!(indices.next().unwrap().index.get(), 1); + assert_eq!(indices.next_back().unwrap().index.get(), 3); + assert_eq!(indices.next().unwrap().index.get(), 2); + assert_eq!(indices.next_back(), None); + } + + #[test] + fn test_indices_empty() { + let list: VecList = VecList::new(); + let mut indices = list.indices(); + assert_eq!(indices.next(), None); + } + + #[test] + fn test_indices_fused() { + let mut list: VecList = VecList::new(); + list.push_back(0); + let mut indices = list.indices(); + assert_eq!(indices.next().unwrap().index.get(), 0); + assert_eq!(indices.next(), None); + assert_eq!(indices.next(), None); + assert_eq!(indices.next(), None); + } + + #[test] + fn test_indices_size_hint() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let mut indices = list.indices(); + + assert_eq!(indices.size_hint(), (5, Some(5))); + indices.next(); + assert_eq!(indices.size_hint(), (4, Some(4))); + indices.next(); + assert_eq!(indices.size_hint(), (3, Some(3))); + indices.next(); + assert_eq!(indices.size_hint(), (2, Some(2))); + indices.next(); + assert_eq!(indices.size_hint(), (1, Some(1))); + indices.next(); + assert_eq!(indices.size_hint(), (0, Some(0))); + } + + #[test] + fn test_into_iter_debug() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let iter = list.into_iter(); + assert_eq!(format!("{iter:?}"), "IntoIter([0, 1, -1, 2, -2])"); + } + + #[test] + fn test_into_iter_double_ended() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let mut iter = list.into_iter(); + assert_eq!(iter.next(), Some(0)); + assert_eq!(iter.next_back(), Some(-2)); + assert_eq!(iter.next(), Some(1)); + assert_eq!(iter.next_back(), Some(2)); + assert_eq!(iter.next(), Some(-1)); + assert_eq!(iter.next_back(), None); + } + + #[test] + fn test_into_iter_empty() { + let list: VecList = VecList::new(); + let mut iter = list.into_iter(); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_into_iter_fused() { + let mut list: VecList = VecList::new(); + list.push_back(0); + let mut iter = list.into_iter(); + assert_eq!(iter.next(), Some(0)); + assert_eq!(iter.next(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_into_iter_size_hint() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let mut iter = list.into_iter(); + + assert_eq!(iter.size_hint(), (5, Some(5))); + iter.next(); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_iter_debug() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let iter = list.iter(); + assert_eq!(format!("{iter:?}"), "Iter([0, 1, -1, 2, -2])"); + } + + #[test] + fn test_iter_double_ended() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let mut iter = list.iter(); + assert_eq!(iter.next(), Some(&0)); + assert_eq!(iter.next_back(), Some(&-2)); + assert_eq!(iter.next(), Some(&1)); + assert_eq!(iter.next_back(), Some(&2)); + assert_eq!(iter.next(), Some(&-1)); + assert_eq!(iter.next_back(), None); + } + + #[test] + fn test_iter_empty() { + let list: VecList = VecList::new(); + let mut iter = list.iter(); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_iter_fused() { + let mut list: VecList = VecList::new(); + list.push_back(0); + let mut iter = list.iter(); + assert_eq!(iter.next(), Some(&0)); + assert_eq!(iter.next(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_iter_size_hint() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let mut iter = list.iter(); + + assert_eq!(iter.size_hint(), (5, Some(5))); + iter.next(); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_iter_mut_debug() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let iter = list.iter_mut(); + assert_eq!(format!("{iter:?}"), "IterMut([0, 1, -1, 2, -2])"); + } + + #[test] + fn test_iter_mut_double_ended() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let mut iter = list.iter_mut(); + assert_eq!(iter.next(), Some(&mut 0)); + assert_eq!(iter.next_back(), Some(&mut -2)); + assert_eq!(iter.next(), Some(&mut 1)); + assert_eq!(iter.next_back(), Some(&mut 2)); + assert_eq!(iter.next(), Some(&mut -1)); + assert_eq!(iter.next_back(), None); + } + + #[test] + fn test_iter_mut_empty() { + let mut list: VecList = VecList::new(); + let mut iter = list.iter_mut(); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_iter_mut_fused() { + let mut list: VecList = VecList::new(); + list.push_back(0); + let mut iter = list.iter_mut(); + assert_eq!(iter.next(), Some(&mut 0)); + assert_eq!(iter.next(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_iter_mut_size_hint() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + let mut iter = list.iter_mut(); + + assert_eq!(iter.size_hint(), (5, Some(5))); + iter.next(); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_vec_list_back() { + let mut list = VecList::new(); + assert_eq!(list.back(), None); + + let index_1 = list.push_back(0); + assert_eq!(list.back(), Some(&0)); + + let index_2 = list.push_back(1); + assert_eq!(list.back(), Some(&1)); + + list.remove(index_2); + assert_eq!(list.back(), Some(&0)); + + list.remove(index_1); + assert_eq!(list.back(), None); + } + + #[test] + fn test_vec_list_back_mut() { + let mut list = VecList::new(); + assert_eq!(list.back_mut(), None); + + let index_1 = list.push_back(0); + assert_eq!(list.back_mut(), Some(&mut 0)); + + let index_2 = list.push_back(1); + assert_eq!(list.back_mut(), Some(&mut 1)); + + list.remove(index_2); + assert_eq!(list.back_mut(), Some(&mut 0)); + + list.remove(index_1); + assert_eq!(list.back_mut(), None); + } + + #[test] + fn test_vec_list_capacity() { + let list: VecList = VecList::new(); + assert_eq!(list.capacity(), 0); + } + + #[test] + fn test_vec_list_clear() { + let mut list = VecList::new(); + let index = list.push_back(0); + list.clear(); + assert!(list.is_empty()); + assert_eq!(list.get(index), None); + } + + #[test] + fn test_vec_list_contains() { + let mut list = VecList::new(); + assert!(!list.contains(&0)); + + let index = list.push_back(0); + assert!(list.contains(&0)); + + list.remove(index); + assert!(!list.contains(&0)); + } + + #[test] + fn test_vec_list_drain() { + let mut list = VecList::new(); + list.drain(); + assert!(list.is_empty()); + + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.drain(); + assert!(list.is_empty()); + } + + #[test] + fn test_vec_list_debug() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + assert_eq!(format!("{list:?}"), "[0, 1, -1, 2, -2]"); + } + + #[test] + fn test_vec_list_equality() { + let mut list_1 = VecList::new(); + list_1.push_back(0); + list_1.push_back(1); + list_1.push_back(-1); + list_1.push_back(2); + list_1.push_back(-2); + + assert_eq!(list_1, Vec::from_iter([0, 1, -1, 2, -2])); + assert_eq!(Vec::from_iter([0, 1, -1, 2, -2]), list_1); + assert_ne!(list_1, Vec::new()); + assert_ne!(Vec::new(), list_1); + + assert_eq!(list_1, LinkedList::from_iter([0, 1, -1, 2, -2])); + assert_eq!(LinkedList::from_iter([0, 1, -1, 2, -2]), list_1); + assert_ne!(list_1, LinkedList::new()); + assert_ne!(LinkedList::new(), list_1); + + assert_eq!(list_1, [0, 1, -1, 2, -2]); + assert_eq!([0, 1, -1, 2, -2], list_1); + assert_ne!(list_1, []); + assert_ne!([], list_1); + + assert_eq!(list_1, [0, 1, -1, 2, -2].as_slice()); + assert_eq!([0, 1, -1, 2, -2].as_slice(), list_1); + assert_ne!(list_1, [].as_slice()); + assert_ne!([].as_slice(), list_1); + + let mut list_2 = list_1.clone(); + list_2.pop_back(); + assert_ne!(list_1, list_2); + + list_2.push_back(-2); + assert_eq!(list_1, list_2); + } + + #[cfg(feature = "std")] + #[test] + fn test_vec_list_hash() { + let state = RandomState::new(); + fn hash(state: &RandomState, value: &VecList) -> u64 { + let mut hasher = state.build_hasher(); + value.hash(&mut hasher); + hasher.finish() + } + + let mut list_1 = VecList::new(); + list_1.push_back(0); + + let list_2 = VecList::new(); + + assert_eq!(hash(&state, &list_1), hash(&state, &list_1)); + assert_ne!(hash(&state, &list_1), hash(&state, &list_2)); + } + + #[test] + fn test_vec_list_extend() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.extend([-1, 2, -2].iter()); + + assert_eq!(list, &[0, 1, -1, 2, -2][..]); + } + + #[test] + fn test_vec_list_from_iterator() { + let list = VecList::from_iter([0, 1, -1, 2, -2].iter().cloned()); + assert_eq!(list, &[0, 1, -1, 2, -2][..]); + } + + #[test] + fn test_vec_list_front() { + let mut list = VecList::new(); + assert_eq!(list.front(), None); + + let index_1 = list.push_front(0); + assert_eq!(list.front(), Some(&0)); + + let index_2 = list.push_front(1); + assert_eq!(list.front(), Some(&1)); + + list.remove(index_2); + assert_eq!(list.front(), Some(&0)); + + list.remove(index_1); + assert_eq!(list.front(), None); + } + + #[test] + fn test_vec_list_front_mut() { + let mut list = VecList::new(); + assert_eq!(list.front_mut(), None); + + let index_1 = list.push_front(0); + assert_eq!(list.front_mut(), Some(&mut 0)); + + let index_2 = list.push_front(1); + assert_eq!(list.front_mut(), Some(&mut 1)); + + list.remove(index_2); + assert_eq!(list.front_mut(), Some(&mut 0)); + + list.remove(index_1); + assert_eq!(list.front_mut(), None); + } + + #[cfg(feature = "std")] + #[test] + fn test_vec_list_get() { + let mut list = VecList::new(); + let index = list.push_back(0); + assert_eq!(list.get(index), Some(&0)); + list.remove(index); + assert_eq!(list.get(index), None); + + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + let index_3 = list.push_back(2); + + list.remove(index_1); + list.pack_to_fit(); + assert_eq!(list.get(index_1), None); + assert_eq!(list.get(index_2), None); + assert_eq!(list.get(index_3), None); + } + + #[cfg(feature = "std")] + #[test] + fn test_vec_list_get_mut() { + let mut list = VecList::new(); + let index = list.push_back(0); + assert_eq!(list.get_mut(index), Some(&mut 0)); + list.remove(index); + assert_eq!(list.get_mut(index), None); + + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + let index_3 = list.push_back(2); + + list.remove(index_1); + list.pack_to_fit(); + assert_eq!(list.get_mut(index_1), None); + assert_eq!(list.get_mut(index_2), None); + assert_eq!(list.get_mut(index_3), None); + } + + #[test] + fn test_vec_list_get_unchecked() { + let mut list = VecList::new(); + let index = list.push_back(0); + assert_eq!(unsafe { list.get_unchecked(index) }, &0); + + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + let index_3 = list.push_back(2); + + list.remove(index_1); + assert_eq!(unsafe { list.get_unchecked(index_2) }, &1); + assert_eq!(unsafe { list.get_unchecked(index_3) }, &2); + } + + #[test] + fn test_vec_list_get_unchecked_mut() { + let mut list = VecList::new(); + let index = list.push_back(0); + assert_eq!(unsafe { list.get_unchecked_mut(index) }, &mut 0); + + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + let index_3 = list.push_back(2); + + list.remove(index_1); + assert_eq!(unsafe { list.get_unchecked_mut(index_2) }, &mut 1); + assert_eq!(unsafe { list.get_unchecked_mut(index_3) }, &mut 2); + } + + #[test] + fn test_vec_list_get_next_index() { + let mut list = VecList::new(); + + let index = list.push_back(0); + assert_eq!(list.get_next_index(index), None); + + list.push_back(1); + assert_eq!(list.get_next_index(index).unwrap().index.get(), 1); + } + + #[test] + fn test_vec_list_get_previous_index() { + let mut list = VecList::new(); + + let index = list.push_front(0); + assert_eq!(list.get_previous_index(index), None); + + list.push_front(1); + assert_eq!(list.get_previous_index(index).unwrap().index.get(), 1); + } + + #[test] + fn test_vec_list_index() { + let mut list = VecList::new(); + + let index = list.push_back(5); + assert_eq!(list[index], 5); + + list[index] = 10; + assert_eq!(list[index], 10); + } + + #[should_panic] + #[test] + fn test_vec_list_index_panic() { + let mut list = VecList::new(); + let index = list.push_back(0); + list.pop_back(); + let _ = list[index]; + } + + #[cfg(feature = "std")] + #[test] + fn test_vec_list_indices() { + let mut list = VecList::new(); + let mut iter = list.indices(); + assert_eq!(iter.next(), None); + + list.push_back(0); + let index = list.push_back(1); + list.push_back(-1); + list.remove(index); + + let mut iter = list.indices(); + assert_eq!(iter.next().unwrap().index.get(), 0); + assert_eq!(iter.next().unwrap().index.get(), 2); + assert_eq!(iter.next(), None); + + list.pack_to_fit(); + + let mut iter = list.indices(); + assert_eq!(iter.next().unwrap().index.get(), 0); + assert_eq!(iter.next().unwrap().index.get(), 1); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_vec_list_insert_after() { + let mut list = VecList::new(); + let index_1 = list.push_front(0); + let index_2 = list.insert_after(index_1, 1); + + assert_eq!(list.back(), Some(&1)); + assert_eq!(list.get_previous_index(index_2), Some(index_1)); + assert_eq!(list.get_next_index(index_1), Some(index_2)); + + let index_3 = list.insert_after(index_1, 2); + + assert_eq!(list.get_previous_index(index_3), Some(index_1)); + assert_eq!(list.get_next_index(index_1), Some(index_3)); + assert_eq!(list.get_next_index(index_3), Some(index_2)); + } + + #[should_panic] + #[test] + fn test_vec_list_insert_after_panic_index_invalidated() { + let mut list = VecList::new(); + let index = list.push_front(0); + list.remove(index); + list.insert_after(index, 1); + } + + #[cfg(feature = "std")] + #[should_panic] + #[test] + fn test_vec_list_insert_after_panic_index_out_of_bounds() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + list.push_back(1); + let index_2 = list.push_back(2); + + list.remove(index_1); + list.pack_to_fit(); + list.insert_after(index_2, 3); + } + + #[test] + fn test_vec_list_insert_before() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.insert_before(index_1, 1); + + assert_eq!(list.front(), Some(&1)); + assert_eq!(list.get_previous_index(index_1), Some(index_2)); + assert_eq!(list.get_next_index(index_2), Some(index_1)); + + let index_3 = list.insert_before(index_1, 2); + + assert_eq!(list.get_previous_index(index_1), Some(index_3)); + assert_eq!(list.get_next_index(index_3), Some(index_1)); + assert_eq!(list.get_next_index(index_2), Some(index_3)); + } + + #[should_panic] + #[test] + fn test_vec_list_insert_before_panic_index_invalidated() { + let mut list = VecList::new(); + let index = list.push_front(0); + list.remove(index); + list.insert_before(index, 1); + } + + #[cfg(feature = "std")] + #[should_panic] + #[test] + fn test_vec_list_insert_before_panic_index_out_of_bounds() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + list.push_back(1); + let index_2 = list.push_back(2); + + list.remove(index_1); + list.pack_to_fit(); + list.insert_before(index_2, 3); + } + + #[test] + fn test_vec_list_into_iterator() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + assert_eq!(list.into_iter().collect::>(), [0, 1, -1, 2, -2]); + } + + #[test] + fn test_vec_list_is_empty() { + let mut list = VecList::new(); + assert!(list.is_empty()); + list.push_back(0); + assert!(!list.is_empty()); + } + + #[test] + fn test_vec_list_iter() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(2); + + let mut iter = list.iter(); + assert_eq!(iter.next(), Some(&0)); + assert_eq!(iter.next(), Some(&1)); + assert_eq!(iter.next(), Some(&2)); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_vec_list_iter_mut() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(2); + + let mut iter = list.iter_mut(); + let value = iter.next().unwrap(); + *value = 100; + + assert_eq!(iter.next(), Some(&mut 1)); + assert_eq!(iter.next(), Some(&mut 2)); + assert_eq!(iter.next(), None); + assert_eq!(list.front(), Some(&100)); + } + + #[test] + fn test_vec_list_len() { + let mut list = VecList::new(); + assert_eq!(list.len(), 0); + let index = list.push_back(0); + assert_eq!(list.len(), 1); + list.remove(index); + assert_eq!(list.len(), 0); + } + + #[test] + fn test_vec_list_new() { + let list: VecList = VecList::new(); + assert_eq!(list.capacity(), 0); + assert_eq!(list.len(), 0); + } + + #[test] + fn test_vec_list_ordering() { + let mut list_1 = VecList::new(); + list_1.push_back(0); + list_1.push_back(1); + list_1.push_back(-1); + list_1.push_back(2); + list_1.push_back(-2); + + let mut list_2 = list_1.clone(); + + list_2.push_back(5); + assert!(list_1 < list_2); + + list_2.pop_back(); + list_2.pop_back(); + assert!(list_1 > list_2); + + list_2.push_back(3); + assert!(list_1 < list_2); + + list_2.pop_back(); + list_2.push_back(-3); + assert!(list_1 > list_2); + } + + #[test] + fn test_vec_list_pop_back() { + let mut list = VecList::new(); + assert_eq!(list.pop_back(), None); + + list.push_back(0); + assert_eq!(list.pop_back(), Some(0)); + } + + #[test] + fn test_vec_list_pop_front() { + let mut list = VecList::new(); + assert_eq!(list.pop_front(), None); + + list.push_front(0); + assert_eq!(list.pop_front(), Some(0)); + } + + #[test] + fn test_vec_list_push_back() { + let mut list = VecList::new(); + list.push_back(0); + assert_eq!(list.back(), Some(&0)); + list.push_back(1); + assert_eq!(list.back(), Some(&1)); + list.push_back(2); + assert_eq!(list.back(), Some(&2)); + } + + #[test] + fn test_vec_list_push_back_capacity_increases() { + let mut list = VecList::with_capacity(1); + assert_eq!(list.capacity(), 1); + + let index = list.push_back(0); + assert_eq!(list.capacity(), 1); + + list.remove(index); + assert_eq!(list.capacity(), 1); + + list.push_back(0); + assert_eq!(list.capacity(), 1); + + list.push_back(1); + assert!(list.capacity() > 1); + } + + #[test] + fn test_vec_list_push_front() { + let mut list = VecList::new(); + list.push_front(0); + assert_eq!(list.front(), Some(&0)); + list.push_front(1); + assert_eq!(list.front(), Some(&1)); + list.push_front(2); + assert_eq!(list.front(), Some(&2)); + } + + #[test] + fn test_vec_list_remove() { + let mut list = VecList::new(); + let index = list.push_back(0); + assert_eq!(list.remove(index), Some(0)); + assert_eq!(list.remove(index), None); + } + + #[test] + fn test_vec_list_reserve() { + let mut list: VecList = VecList::new(); + assert_eq!(list.capacity(), 0); + + list.reserve(10); + let capacity = list.capacity(); + + assert!(capacity >= 10); + list.reserve(5); + + assert_eq!(list.capacity(), capacity); + } + + #[test] + fn test_vec_list_retain() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(-1); + list.push_back(2); + list.push_back(-2); + + list.retain(|&mut value| value >= 0); + assert_eq!(list.into_iter().collect::>(), [0, 1, 2]); + } + + #[cfg(feature = "std")] + #[test] + fn test_vec_list_pack_to() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + let index_3 = list.push_back(2); + assert!(list.capacity() >= 3); + + list.remove(index_1); + assert!(list.capacity() >= 3); + + let indices = list.indices(); + assert_eq!( + indices.map(|index| index.index.get()).collect::>(), + [1, 2] + ); + + let map = list.pack_to(5); + assert_eq!(list.capacity(), 5); + + let indices = list.indices(); + assert_eq!( + indices.map(|index| index.index.get()).collect::>(), + [0, 1] + ); + + assert_eq!(map.len(), 2); + assert_eq!(map.get(&index_2).unwrap().index.get(), 0); + assert_eq!(map.get(&index_3).unwrap().index.get(), 1); + } + + #[cfg(feature = "std")] + #[test] + fn test_vec_list_pack_to_empty() { + let mut list: VecList = VecList::with_capacity(5); + list.pack_to(0); + assert_eq!(list.capacity(), 0); + } + + #[cfg(feature = "std")] + #[should_panic] + #[test] + fn test_vec_list_pack_to_panic() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(2); + list.pack_to(2); + } + + #[cfg(feature = "std")] + #[test] + fn test_vec_list_pack_to_fit() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + let index_3 = list.push_back(2); + assert!(list.capacity() >= 3); + + list.remove(index_1); + assert!(list.capacity() >= 3); + + let indices = list.indices(); + assert_eq!( + indices.map(|index| index.index.get()).collect::>(), + [1, 2] + ); + + let map = list.pack_to_fit(); + assert_eq!(list.capacity(), 2); + + let indices = list.indices(); + assert_eq!( + indices.map(|index| index.index.get()).collect::>(), + [0, 1] + ); + + assert_eq!(map.len(), 2); + assert_eq!(map.get(&index_2).unwrap().index.get(), 0); + assert_eq!(map.get(&index_3).unwrap().index.get(), 1); + } + + #[test] + fn test_vec_list_with_capacity() { + let list: VecList = VecList::with_capacity(10); + assert_eq!(list.capacity(), 10); + } + + #[test] + fn test_vec_list_clone_from() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + let index_3 = list.push_back(2); + + let mut list2 = VecList::new(); + list2.clone_from(&list); + assert_eq!(list2.get(index_1), Some(&0)); + assert_eq!(list2.get(index_2), Some(&1)); + assert_eq!(list2.get(index_3), Some(&2)); + } + + #[test] + fn test_move_individual_elements() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + let index_3 = list.push_back(2); + let index_4 = list.push_back(3); + + // Move to tail + list.move_after(index_1, index_4); + assert_eq!(list.iter().copied().collect::>(), vec![1, 2, 3, 0]); + assert_eq!( + list.iter().rev().copied().collect::>(), + vec![0, 3, 2, 1] + ); + assert_eq!(list.back(), list.get(index_1)); + + // Move to head + list.move_before(index_1, index_2); + assert_eq!(list.iter().copied().collect::>(), vec![0, 1, 2, 3]); + assert_eq!( + list.iter().rev().copied().collect::>(), + vec![3, 2, 1, 0] + ); + + // Move non-tail/head node + list.move_before(index_3, index_2); + assert_eq!(list.iter().copied().collect::>(), vec![0, 2, 1, 3]); + assert_eq!( + list.iter().rev().copied().collect::>(), + vec![3, 1, 2, 0] + ); + } + + #[test] + fn test_move_back_index_front_index() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + list.push_back(1); + list.push_back(2); + list.push_back(3); + + // Move to tail + list.move_after(index_1, list.back_index().unwrap()); + assert_eq!(list.iter().copied().collect::>(), vec![1, 2, 3, 0]); + assert_eq!( + list.iter().rev().copied().collect::>(), + vec![0, 3, 2, 1] + ); + assert_eq!(list.back(), list.get(index_1)); + + // Move to head + list.move_before(index_1, list.front_index().unwrap()); + assert_eq!(list.iter().copied().collect::>(), vec![0, 1, 2, 3]); + assert_eq!( + list.iter().rev().copied().collect::>(), + vec![3, 2, 1, 0] + ); + } + + #[should_panic] + #[test] + fn test_move_after_panic1() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + list.remove(index_1); + list.move_after(index_1, index_2); + } + + #[should_panic] + #[test] + fn test_move_after_panic2() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + list.remove(index_1); + list.move_after(index_2, index_1); + } + + #[should_panic] + #[test] + fn test_move_after_panic3() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + list.move_after(index_1, index_1); + } + + #[should_panic] + #[test] + fn test_move_before_panic1() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + list.remove(index_1); + list.move_before(index_1, index_2); + } + + #[should_panic] + #[test] + fn test_move_before_panic2() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + let index_2 = list.push_back(1); + list.remove(index_1); + list.move_before(index_2, index_1); + } + + #[should_panic] + #[test] + fn test_move_before_panic3() { + let mut list = VecList::new(); + let index_1 = list.push_back(0); + list.move_before(index_1, index_1); + } +} diff --git a/vendor/dlv-list/src/serde.rs b/vendor/dlv-list/src/serde.rs new file mode 100644 index 0000000..4ebbdaf --- /dev/null +++ b/vendor/dlv-list/src/serde.rs @@ -0,0 +1,113 @@ +use core::{ + fmt::{self, Formatter}, + marker::PhantomData, +}; + +use serde::{ + de::{ + value::SeqDeserializer, Deserialize, Deserializer, Error, IntoDeserializer, SeqAccess, Visitor, + }, + ser::{Serialize, SerializeSeq, Serializer}, +}; + +use crate::VecList; + +impl Serialize for VecList { + fn serialize(&self, serializer: U) -> Result { + let mut seq = serializer.serialize_seq(Some(self.len()))?; + + for value in self.iter() { + seq.serialize_element(value)?; + } + + seq.end() + } +} + +struct VecListVisitor(PhantomData); + +impl<'de, T: Deserialize<'de>> Visitor<'de> for VecListVisitor { + type Value = VecList; + + fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "a sequence") + } + + fn visit_seq(self, mut access: A) -> Result + where + A: SeqAccess<'de>, + { + let mut list = VecList::with_capacity(access.size_hint().unwrap_or_default()); + + while let Some(value) = access.next_element()? { + let _ = list.push_back(value); + } + + Ok(list) + } +} + +impl<'de, T: Deserialize<'de>> Deserialize<'de> for VecList { + fn deserialize>(deserializer: D) -> Result { + deserializer.deserialize_seq(VecListVisitor(PhantomData)) + } +} + +impl<'de, T, E> IntoDeserializer<'de, E> for VecList +where + T: IntoDeserializer<'de, E>, + E: Error, +{ + type Deserializer = SeqDeserializer<::IntoIter, E>; + + fn into_deserializer(self) -> Self::Deserializer { + SeqDeserializer::new(self.into_iter()) + } +} + +#[allow(unused_results)] +#[cfg(test)] +mod test { + use coverage_helper::test; + use serde_test::{assert_de_tokens_error, assert_tokens, Token}; + + use super::*; + + #[test] + fn test_de_error() { + assert_de_tokens_error::>( + &[Token::Map { len: Some(0) }], + "invalid type: map, expected a sequence", + ); + } + + #[test] + fn test_ser_de_empty() { + let list = VecList::::new(); + + assert_tokens(&list, &[Token::Seq { len: Some(0) }, Token::SeqEnd]); + } + + #[test] + fn test_ser_de() { + let mut list = VecList::new(); + list.push_back(0); + list.push_back(1); + list.push_back(2); + list.push_back(3); + list.push_back(4); + + assert_tokens( + &list, + &[ + Token::Seq { len: Some(5) }, + Token::I32(0), + Token::I32(1), + Token::I32(2), + Token::I32(3), + Token::I32(4), + Token::SeqEnd, + ], + ); + } +} diff --git a/vendor/getrandom/.cargo-checksum.json b/vendor/getrandom/.cargo-checksum.json new file mode 100644 index 0000000..fc46f72 --- /dev/null +++ b/vendor/getrandom/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"fe1a0dc50ac5c7bdd60591f6d1027072c68dcf142131945f782169c74b9e8188","Cargo.toml":"5506345251dee6e156a3d0072d2b3b6bc6894d8cf91adb85fefe211741e7c7f9","LICENSE-APACHE":"aaff376532ea30a0cd5330b9502ad4a4c8bf769c539c87ffe78819d188a18ebf","LICENSE-MIT":"209fbbe0ad52d9235e37badf9cadfe4dbdc87203179c0899e738b39ade42177b","README.md":"e5858de17fc28ec7a3a62cbefedd301ace8a85291d0aad5cb416824d1b5abadd","SECURITY.md":"816ea79f8c7937888ab5a972a1efb270c4bada028b448953a195359fe11d526e","benches/buffer.rs":"242f56eaeecd1d0a0f6f9419d1bf312b8d211215355022bd4aef5e5e0f53e2a5","src/3ds.rs":"e41b653723740ed89fa68f495b64125786e8dec002e3556d164c5795db62ea50","src/apple-other.rs":"3ff0abc72786a2ac063cdc5df4d18cc53dc493cd68fcb33734cf40cfdbb8f644","src/bsd_arandom.rs":"cfa0627a6b4d1f37065d415972ab813bf1c9f43979d2ff9440c92a53868123aa","src/custom.rs":"a256bd6e7e9bb560803f23a36bd437859ea8a9d8ec92608930b94b33e7314c64","src/dragonfly.rs":"047008e742a7a8050e61ed9626b9f4146dfaa0675e11d6f3680eb8af498d9a6d","src/emscripten.rs":"e0b3b44b52f54454ec3e0a9e7c5222003369d9d1575cc0652e3e7cbe1b3b6da7","src/error.rs":"ff09a7e02d7aff3e45eca6bbef6c686cc46f3c2371a0897a856e4dec4b942e46","src/error_impls.rs":"9c34832ebb99cd5e31bc5c8ffc5beb5b3fa6f7ff0226aaa1cdf8e10e6d64b324","src/espidf.rs":"915ca14cbf9299de51a3c67f34fdd252461d6545f33a7232dfb7fa247ccc0209","src/fuchsia.rs":"d307b15db9f2d67b43050ae6027779a6eb2b8a69e1e89931b55b767aa2622250","src/hermit.rs":"18fdd7917c73f8b16aa82b18003948d32f9b314da10e16ef9cd2fa077b17af00","src/hurd.rs":"1053908c4eaeae9e44078c9509aa80268caa1d66642b7c6a9a80f5b9f0e63fb0","src/js.rs":"c4cd60bcfe63f8affe947773197e288536ab205a73001059f39fc2e5688e98b6","src/lib.rs":"178b4b1dae3a41721f365ea5a4eda3f5b936b310afa4431935968e96edac3120","src/linux_android.rs":"e5f9e579bbde254fcab8f6b79b893d6b74054e023b21c56a3b2b21d8f4b4d825","src/macos.rs":"8f51e095906e751b68e837bfc63cc02b243e1698b66353566ccba507c81ddad3","src/openbsd.rs":"f6fd0aa74f704335a7e0532bf5e61a7ca90b0cbc398a9c01a0fd891b6fabca0c","src/rdrand.rs":"846ac7b8380a05a50e0592dca57338beb1634c0efc878d6d1e9421be3469a744","src/solaris_illumos.rs":"7209c8b1172fc4df5ad8a79f165556b403cdd90b9eb5f7f7f9ec97bf06f4d8d7","src/solid.rs":"58919109faf06e6d546f75f785d78d6c055e1f95110d1791d9191d1e404f1e20","src/use_file.rs":"ecfc1011b4a9c962ae9b4b75ca5149a4ee83cb0951a80224ce5417046ce11717","src/util.rs":"580fb7c4e41eb6007def8626e019829c22a63980fa4da68a1adef687c57953a2","src/util_libc.rs":"48c1fe251958c6c57b7c93d83f3648d97034feeee0d5cda0cbe7bc0ee0a73fca","src/vita.rs":"ecfa9d347ad5c480ba8ff80a9de968ae060ffb435f1e95777ee413642e62e50a","src/vxworks.rs":"984726b6dd9638a38ceda83124683419b9d69a9041ad9117a470eaec5b386ce4","src/wasi.rs":"229a58af3f13a629571fb83a0c11ef0ed696ba7a44ee2e811c9f348a19b2fb69","src/windows.rs":"dd3d833979fb6b96c04b84dbf8461d5fc819bde93ad9dc26bd0f6c282656c733","tests/common/mod.rs":"b9a36043d71963ba43a9e2899ba8eea80ff9f3284d243d9b9b9f941afa4f4aa4","tests/custom.rs":"1e944ae523b62dba53fe3daf1b964a2498c8fdd21dfa7afe53781bff2fcf276e","tests/normal.rs":"9e1c4b1e468a09ed0225370dfb6608f8b8135e0fabb09bbc1a718105164aade6","tests/rdrand.rs":"156676b57f1e6bd4d66d85b8a999f1cf7a8fb749a10b8b2b4dbbcf803e8c4cd3"},"package":"fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f"} \ No newline at end of file diff --git a/vendor/getrandom/CHANGELOG.md b/vendor/getrandom/CHANGELOG.md new file mode 100644 index 0000000..7b1f46a --- /dev/null +++ b/vendor/getrandom/CHANGELOG.md @@ -0,0 +1,435 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.2.11] - 2023-11-08 +### Added +- GNU/Hurd support [#370] + +### Changed +- Renamed `__getrandom_internal` to `__GETRANDOM_INTERNAL` [#369] +- Updated link to Hermit docs [#374] + +[#369]: https://github.com/rust-random/getrandom/pull/369 +[#370]: https://github.com/rust-random/getrandom/pull/370 +[#374]: https://github.com/rust-random/getrandom/pull/374 + +## [0.2.10] - 2023-06-06 +### Added +- Support for PS Vita (`armv7-sony-vita-newlibeabihf`) [#359] + +### Changed +- Use getentropy from libc on Emscripten targets [#362] + +[#359]: https://github.com/rust-random/getrandom/pull/359 +[#362]: https://github.com/rust-random/getrandom/pull/362 + +## [0.2.9] - 2023-04-06 +### Added +- AIX support [#282] +- `getrandom_uninit` function [#291] +- `wasm64-unknown-unknown` support [#303] +- tvOS and watchOS support [#317] +- QNX/nto support [#325] +- Support for `getrandom` syscall on NetBSD ≥ 10.0 [#331] +- `RtlGenRandom` fallback for non-UWP Windows [#337] + +### Breaking Changes +- Update MSRV to 1.36 [#291] + +### Fixed +- Solaris/OpenBSD/Dragonfly build [#301] + +### Changed +- Update MSRV to 1.36 [#291] +- Use getentropy on Emscripten [#307] +- Solaris: consistantly use `/dev/random` source [#310] +- Move 3ds selection above rdrand/js/custom fallback [#312] +- Remove buffer zeroing from Node.js implementation [#315] +- Use `open` instead of `open64` [#326] +- Remove #cfg from bsd_arandom.rs [#332] +- Hermit: use `sys_read_entropy` syscall [#333] +- Eliminate potential panic in sys_fill_exact [#334] +- rdrand: Remove checking for 0 and !0 and instead check CPU family and do a self-test [#335] +- Move `__getrandom_custom` definition into a const block [#344] +- Switch the custom backend to Rust ABI [#347] + +[#282]: https://github.com/rust-random/getrandom/pull/282 +[#291]: https://github.com/rust-random/getrandom/pull/291 +[#301]: https://github.com/rust-random/getrandom/pull/301 +[#303]: https://github.com/rust-random/getrandom/pull/303 +[#307]: https://github.com/rust-random/getrandom/pull/307 +[#310]: https://github.com/rust-random/getrandom/pull/310 +[#312]: https://github.com/rust-random/getrandom/pull/312 +[#315]: https://github.com/rust-random/getrandom/pull/315 +[#317]: https://github.com/rust-random/getrandom/pull/317 +[#325]: https://github.com/rust-random/getrandom/pull/325 +[#326]: https://github.com/rust-random/getrandom/pull/326 +[#331]: https://github.com/rust-random/getrandom/pull/331 +[#332]: https://github.com/rust-random/getrandom/pull/332 +[#333]: https://github.com/rust-random/getrandom/pull/333 +[#334]: https://github.com/rust-random/getrandom/pull/334 +[#335]: https://github.com/rust-random/getrandom/pull/335 +[#337]: https://github.com/rust-random/getrandom/pull/337 +[#344]: https://github.com/rust-random/getrandom/pull/344 +[#347]: https://github.com/rust-random/getrandom/pull/347 + +## [0.2.8] - 2022-10-20 +### Changed +- The [Web Cryptography API] will now be preferred on `wasm32-unknown-unknown` + when using the `"js"` feature, even on Node.js [#284] [#295] + +### Added +- Added benchmarks to track buffer initialization cost [#272] + +### Fixed +- Use `$crate` in `register_custom_getrandom!` [#270] + +### Documentation +- Add information about enabling `"js"` feature [#280] +- Fix link to `wasm-bindgen` [#278] +- Document the varied implementations for underlying randomness sources [#276] + +[Web Cryptography API]: https://developer.mozilla.org/en-US/docs/Web/API/Web_Crypto_API +[#284]: https://github.com/rust-random/getrandom/pull/284 +[#295]: https://github.com/rust-random/getrandom/pull/295 +[#272]: https://github.com/rust-random/getrandom/pull/272 +[#270]: https://github.com/rust-random/getrandom/pull/270 +[#280]: https://github.com/rust-random/getrandom/pull/280 +[#278]: https://github.com/rust-random/getrandom/pull/278 +[#276]: https://github.com/rust-random/getrandom/pull/276 + +## [0.2.7] - 2022-06-14 +### Changed +- Update `wasi` dependency to `0.11` [#253] + +### Fixed +- Use `AtomicPtr` instead of `AtomicUsize` for Strict Provenance compatibility. [#263] + +### Documentation +- Add comments explaining use of fallback mechanisms [#257] [#260] + +[#263]: https://github.com/rust-random/getrandom/pull/263 +[#260]: https://github.com/rust-random/getrandom/pull/260 +[#253]: https://github.com/rust-random/getrandom/pull/253 +[#257]: https://github.com/rust-random/getrandom/pull/257 + +## [0.2.6] - 2022-03-28 +### Added +- Nintendo 3DS (`armv6k-nintendo-3ds`) support [#248] + +### Changed +- Retry `open` when interrupted [#252] + +[#248]: https://github.com/rust-random/getrandom/pull/248 +[#252]: https://github.com/rust-random/getrandom/pull/252 + +## [0.2.5] - 2022-02-22 +### Added +- ESP-IDF targets (`*‑espidf`) support [#245] + +### Fixed +- Webpack warning caused by dynamic require [#234] +- Error checking on iOS for `SecRandomCopyBytes` [#244] + +[#234]: https://github.com/rust-random/getrandom/pull/234 +[#244]: https://github.com/rust-random/getrandom/pull/244 +[#245]: https://github.com/rust-random/getrandom/pull/245 + +## [0.2.4] - 2021-12-13 +### Changed +- Use explicit imports in the `js` backend [#220] +- Use `/dev/urandom` on Redox instead of `rand:` [#222] +- Use `NonZeroU32::new_unchecked` to convert wasi error [#233] + +### Added +- SOLID targets (`*-kmc-solid_*`) support [#235] +- Limited Hermit (`x86_64-unknown-hermit`) support [#236] + +[#220]: https://github.com/rust-random/getrandom/pull/220 +[#222]: https://github.com/rust-random/getrandom/pull/222 +[#233]: https://github.com/rust-random/getrandom/pull/233 +[#235]: https://github.com/rust-random/getrandom/pull/235 +[#236]: https://github.com/rust-random/getrandom/pull/236 + +## [0.2.3] - 2021-04-10 +### Changed +- Replace build.rs with link attributes. [#205] +- Add support for getrandom syscall on DragonFly BSD. [#210] +- Improve Node.js detection. [#215] + +[#205]: https://github.com/rust-random/getrandom/pull/205 +[#210]: https://github.com/rust-random/getrandom/pull/210 +[#215]: https://github.com/rust-random/getrandom/pull/215 + +## [0.2.2] - 2021-01-19 +### Changed +- Forward `rustc-dep-of-std` to dependencies. [#198] +- Highlight feature-dependent functionality in documentation using the `doc_cfg` feature. [#200] + +[#198]: https://github.com/rust-random/getrandom/pull/198 +[#200]: https://github.com/rust-random/getrandom/pull/200 + +## [0.2.1] - 2021-01-03 +### Changed +- Update `cfg-if` to v1.0. [#166] +- Update `wasi` to v0.10. [#167] + +### Fixed +- Multithreaded WASM support. [#165] + +### Removed +- Windows XP support. [#177] +- Direct `stdweb` support. [#178] +- CloudABI support. [#184] + +[#165]: https://github.com/rust-random/getrandom/pull/165 +[#166]: https://github.com/rust-random/getrandom/pull/166 +[#167]: https://github.com/rust-random/getrandom/pull/167 +[#177]: https://github.com/rust-random/getrandom/pull/177 +[#178]: https://github.com/rust-random/getrandom/pull/178 +[#184]: https://github.com/rust-random/getrandom/pull/184 + +## [0.2.0] - 2020-09-10 +### Features for using getrandom on unsupported targets + +The following (off by default) Cargo features have been added: +- `"rdrand"` - use the RDRAND instruction on `no_std` `x86`/`x86_64` targets [#133] +- `"js"` - use JavaScript calls on `wasm32-unknown-unknown` [#149] + - Replaces the `stdweb` and `wasm-bindgen` features (which are removed) +- `"custom"` - allows a user to specify a custom implementation [#109] + +### Breaking Changes +- Unsupported targets no longer compile [#107] +- Change/Add `Error` constants [#120] +- Only impl `std` traits when the `"std"` Cargo feature is specified [#106] +- Remove official support for Hermit, L4Re, and UEFI [#133] +- Remove optional `"log"` dependency [#131] +- Update minimum supported Linux kernel to 2.6.32 [#153] +- Update MSRV to 1.34 [#159] + +[#106]: https://github.com/rust-random/getrandom/pull/106 +[#107]: https://github.com/rust-random/getrandom/pull/107 +[#109]: https://github.com/rust-random/getrandom/pull/109 +[#120]: https://github.com/rust-random/getrandom/pull/120 +[#131]: https://github.com/rust-random/getrandom/pull/131 +[#133]: https://github.com/rust-random/getrandom/pull/133 +[#149]: https://github.com/rust-random/getrandom/pull/149 +[#153]: https://github.com/rust-random/getrandom/pull/153 +[#159]: https://github.com/rust-random/getrandom/pull/159 + +## [0.1.16] - 2020-12-31 +### Changed +- Update `cfg-if` to v1.0. [#173] +- Implement `std::error::Error` for the `Error` type on additional targets. [#169] + +### Fixed +- Multithreaded WASM support. [#171] + +[#173]: https://github.com/rust-random/getrandom/pull/173 +[#171]: https://github.com/rust-random/getrandom/pull/171 +[#169]: https://github.com/rust-random/getrandom/pull/169 + +## [0.1.15] - 2020-09-10 +### Changed +- Added support for Internet Explorer 11 [#139] +- Fix Webpack require warning with `wasm-bindgen` [#137] + +[#137]: https://github.com/rust-random/getrandom/pull/137 +[#139]: https://github.com/rust-random/getrandom/pull/139 + +## [0.1.14] - 2020-01-07 +### Changed +- Remove use of spin-locks in the `use_file` module. [#125] +- Update `wasi` to v0.9. [#126] +- Do not read errno value on DragonFlyBSD to fix compilation failure. [#129] + +[#125]: https://github.com/rust-random/getrandom/pull/125 +[#126]: https://github.com/rust-random/getrandom/pull/126 +[#129]: https://github.com/rust-random/getrandom/pull/129 + +## [0.1.13] - 2019-08-25 +### Added +- VxWorks targets support. [#86] + +### Changed +- If zero-length slice is passed to the `getrandom` function, always return +`Ok(())` immediately without doing any calls to the underlying operating +system. [#104] +- Use the `kern.arandom` sysctl on NetBSD. [#115] + +### Fixed +- Bump `cfg-if` minimum version from 0.1.0 to 0.1.2. [#112] +- Typos and bad doc links. [#117] + +[#86]: https://github.com/rust-random/getrandom/pull/86 +[#104]: https://github.com/rust-random/getrandom/pull/104 +[#112]: https://github.com/rust-random/getrandom/pull/112 +[#115]: https://github.com/rust-random/getrandom/pull/115 +[#117]: https://github.com/rust-random/getrandom/pull/117 + +## [0.1.12] - 2019-08-18 +### Changed +- Update wasi dependency from v0.5 to v0.7. [#100] + +[#100]: https://github.com/rust-random/getrandom/pull/100 + +## [0.1.11] - 2019-08-25 +### Fixed +- Implement `std`-dependent traits for selected targets even if `std` +feature is disabled. (backward compatibility with v0.1.8) [#96] + +[#96]: https://github.com/rust-random/getrandom/pull/96 + +## [0.1.10] - 2019-08-18 [YANKED] +### Changed +- Use the dummy implementation on `wasm32-unknown-unknown` even with the +disabled `dummy` feature. [#90] + +### Fixed +- Fix CSP error for `wasm-bindgen`. [#92] + +[#90]: https://github.com/rust-random/getrandom/pull/90 +[#92]: https://github.com/rust-random/getrandom/pull/92 + +## [0.1.9] - 2019-08-14 [YANKED] +### Changed +- Remove `std` dependency for opening and reading files. [#58] +- Use `wasi` instead of `libc` on WASI target. [#64] +- By default emit a compile-time error when built for an unsupported target. +This behaviour can be disabled by using the `dummy` feature. [#71] + +### Added +- Add support for UWP targets. [#69] +- Add unstable `rustc-dep-of-std` feature. [#78] + +[#58]: https://github.com/rust-random/getrandom/pull/58 +[#64]: https://github.com/rust-random/getrandom/pull/64 +[#69]: https://github.com/rust-random/getrandom/pull/69 +[#71]: https://github.com/rust-random/getrandom/pull/71 +[#78]: https://github.com/rust-random/getrandom/pull/78 + +## [0.1.8] - 2019-07-29 +### Changed +- Explicitly specify types to arguments of 'libc::syscall'. [#74] + +[#74]: https://github.com/rust-random/getrandom/pull/74 + +## [0.1.7] - 2019-07-29 +### Added +- Support for hermit and l4re. [#61] +- `Error::raw_os_error` method, `Error::INTERNAL_START` and +`Error::CUSTOM_START` constants. Use `libc` for retrieving OS error descriptions. [#54] + +### Changed +- Remove `lazy_static` dependency and use custom structures for lock-free +initialization. [#51] [#52] +- Try `getrandom()` first on FreeBSD. [#57] + +### Removed +- Bitrig support. [#56] + +### Deprecated +- `Error::UNKNOWN`, `Error::UNAVAILABLE`. [#54] + +[#51]: https://github.com/rust-random/getrandom/pull/51 +[#52]: https://github.com/rust-random/getrandom/pull/52 +[#54]: https://github.com/rust-random/getrandom/pull/54 +[#56]: https://github.com/rust-random/getrandom/pull/56 +[#57]: https://github.com/rust-random/getrandom/pull/57 +[#61]: https://github.com/rust-random/getrandom/pull/61 + +## [0.1.6] - 2019-06-30 +### Changed +- Minor change of RDRAND AMD bug handling. [#48] + +[#48]: https://github.com/rust-random/getrandom/pull/48 + +## [0.1.5] - 2019-06-29 +### Fixed +- Use shared `File` instead of shared file descriptor. [#44] +- Workaround for RDRAND hardware bug present on some AMD CPUs. [#43] + +### Changed +- Try `getentropy` and then fallback to `/dev/random` on macOS. [#38] + +[#38]: https://github.com/rust-random/getrandom/issues/38 +[#43]: https://github.com/rust-random/getrandom/pull/43 +[#44]: https://github.com/rust-random/getrandom/issues/44 + +## [0.1.4] - 2019-06-28 +### Added +- Add support for `x86_64-unknown-uefi` target by using RDRAND with CPUID +feature detection. [#30] + +### Fixed +- Fix long buffer issues on Windows and Linux. [#31] [#32] +- Check `EPERM` in addition to `ENOSYS` on Linux. [#37] + +### Changed +- Improve efficiency by sharing file descriptor across threads. [#13] +- Remove `cloudabi`, `winapi`, and `fuchsia-cprng` dependencies. [#40] +- Improve RDRAND implementation. [#24] +- Don't block during syscall detection on Linux. [#26] +- Increase consistency with libc implementation on FreeBSD. [#36] +- Apply `rustfmt`. [#39] + +[#30]: https://github.com/rust-random/getrandom/pull/30 +[#13]: https://github.com/rust-random/getrandom/issues/13 +[#40]: https://github.com/rust-random/getrandom/pull/40 +[#26]: https://github.com/rust-random/getrandom/pull/26 +[#24]: https://github.com/rust-random/getrandom/pull/24 +[#39]: https://github.com/rust-random/getrandom/pull/39 +[#36]: https://github.com/rust-random/getrandom/pull/36 +[#31]: https://github.com/rust-random/getrandom/issues/31 +[#32]: https://github.com/rust-random/getrandom/issues/32 +[#37]: https://github.com/rust-random/getrandom/issues/37 + +## [0.1.3] - 2019-05-15 +- Update for `wasm32-unknown-wasi` being renamed to `wasm32-wasi`, and for + WASI being categorized as an OS. + +## [0.1.2] - 2019-04-06 +- Add support for `wasm32-unknown-wasi` target. + +## [0.1.1] - 2019-04-05 +- Enable std functionality for CloudABI by default. + +## [0.1.0] - 2019-03-23 +Publish initial implementation. + +## [0.0.0] - 2019-01-19 +Publish an empty template library. + +[0.2.11]: https://github.com/rust-random/getrandom/compare/v0.2.10...v0.2.11 +[0.2.10]: https://github.com/rust-random/getrandom/compare/v0.2.9...v0.2.10 +[0.2.9]: https://github.com/rust-random/getrandom/compare/v0.2.8...v0.2.9 +[0.2.8]: https://github.com/rust-random/getrandom/compare/v0.2.7...v0.2.8 +[0.2.7]: https://github.com/rust-random/getrandom/compare/v0.2.6...v0.2.7 +[0.2.6]: https://github.com/rust-random/getrandom/compare/v0.2.5...v0.2.6 +[0.2.5]: https://github.com/rust-random/getrandom/compare/v0.2.4...v0.2.5 +[0.2.4]: https://github.com/rust-random/getrandom/compare/v0.2.3...v0.2.4 +[0.2.3]: https://github.com/rust-random/getrandom/compare/v0.2.2...v0.2.3 +[0.2.2]: https://github.com/rust-random/getrandom/compare/v0.2.1...v0.2.2 +[0.2.1]: https://github.com/rust-random/getrandom/compare/v0.2.0...v0.2.1 +[0.2.0]: https://github.com/rust-random/getrandom/compare/v0.1.15...v0.2.0 +[0.1.16]: https://github.com/rust-random/getrandom/compare/v0.1.15...v0.1.16 +[0.1.15]: https://github.com/rust-random/getrandom/compare/v0.1.14...v0.1.15 +[0.1.14]: https://github.com/rust-random/getrandom/compare/v0.1.13...v0.1.14 +[0.1.13]: https://github.com/rust-random/getrandom/compare/v0.1.12...v0.1.13 +[0.1.12]: https://github.com/rust-random/getrandom/compare/v0.1.11...v0.1.12 +[0.1.11]: https://github.com/rust-random/getrandom/compare/v0.1.10...v0.1.11 +[0.1.10]: https://github.com/rust-random/getrandom/compare/v0.1.9...v0.1.10 +[0.1.9]: https://github.com/rust-random/getrandom/compare/v0.1.8...v0.1.9 +[0.1.8]: https://github.com/rust-random/getrandom/compare/v0.1.7...v0.1.8 +[0.1.7]: https://github.com/rust-random/getrandom/compare/v0.1.6...v0.1.7 +[0.1.6]: https://github.com/rust-random/getrandom/compare/v0.1.5...v0.1.6 +[0.1.5]: https://github.com/rust-random/getrandom/compare/v0.1.4...v0.1.5 +[0.1.4]: https://github.com/rust-random/getrandom/compare/v0.1.3...v0.1.4 +[0.1.3]: https://github.com/rust-random/getrandom/compare/v0.1.2...v0.1.3 +[0.1.2]: https://github.com/rust-random/getrandom/compare/v0.1.1...v0.1.2 +[0.1.1]: https://github.com/rust-random/getrandom/compare/v0.1.0...v0.1.1 +[0.1.0]: https://github.com/rust-random/getrandom/compare/v0.0.0...v0.1.0 +[0.0.0]: https://github.com/rust-random/getrandom/releases/tag/v0.0.0 diff --git a/vendor/getrandom/Cargo.toml b/vendor/getrandom/Cargo.toml new file mode 100644 index 0000000..a4c3946 --- /dev/null +++ b/vendor/getrandom/Cargo.toml @@ -0,0 +1,94 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "getrandom" +version = "0.2.11" +authors = ["The Rand Project Developers"] +exclude = [".*"] +description = "A small cross-platform library for retrieving random data from system source" +documentation = "https://docs.rs/getrandom" +readme = "README.md" +categories = [ + "os", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-random/getrandom" + +[package.metadata.cross.target.x86_64-unknown-netbsd] +pre-build = [ + "mkdir -p /tmp/netbsd", + "curl https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.2/amd64/binary/sets/base.tar.xz -O", + "tar -C /tmp/netbsd -xJf base.tar.xz", + "cp /tmp/netbsd/usr/lib/libexecinfo.so /usr/local/x86_64-unknown-netbsd/lib", + "rm base.tar.xz", + "rm -rf /tmp/netbsd", +] + +[package.metadata.docs.rs] +features = [ + "std", + "custom", +] +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[dependencies.cfg-if] +version = "1" + +[dependencies.compiler_builtins] +version = "0.1" +optional = true + +[dependencies.core] +version = "1.0" +optional = true +package = "rustc-std-workspace-core" + +[features] +custom = [] +js = [ + "wasm-bindgen", + "js-sys", +] +rdrand = [] +rustc-dep-of-std = [ + "compiler_builtins", + "core", + "libc/rustc-dep-of-std", + "wasi/rustc-dep-of-std", +] +std = [] +test-in-browser = [] + +[target."cfg(all(any(target_arch = \"wasm32\", target_arch = \"wasm64\"), target_os = \"unknown\"))".dependencies.js-sys] +version = "0.3" +optional = true + +[target."cfg(all(any(target_arch = \"wasm32\", target_arch = \"wasm64\"), target_os = \"unknown\"))".dependencies.wasm-bindgen] +version = "0.2.62" +optional = true +default-features = false + +[target."cfg(all(any(target_arch = \"wasm32\", target_arch = \"wasm64\"), target_os = \"unknown\"))".dev-dependencies.wasm-bindgen-test] +version = "0.3.18" + +[target."cfg(target_os = \"wasi\")".dependencies.wasi] +version = "0.11" +default-features = false + +[target."cfg(unix)".dependencies.libc] +version = "0.2.149" +default-features = false diff --git a/vendor/getrandom/LICENSE-APACHE b/vendor/getrandom/LICENSE-APACHE new file mode 100644 index 0000000..17d7468 --- /dev/null +++ b/vendor/getrandom/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/getrandom/LICENSE-MIT b/vendor/getrandom/LICENSE-MIT new file mode 100644 index 0000000..d93b5ba --- /dev/null +++ b/vendor/getrandom/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright 2018 Developers of the Rand project +Copyright (c) 2014 The Rust Project Developers + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/getrandom/README.md b/vendor/getrandom/README.md new file mode 100644 index 0000000..c43ad42 --- /dev/null +++ b/vendor/getrandom/README.md @@ -0,0 +1,64 @@ +# getrandom + +[![Build Status]][GitHub Actions] [![Crate]][crates.io] [![Documentation]][docs.rs] [![Dependency Status]][deps.rs] [![Downloads]][crates.io] [![License]][LICENSE-MIT] + +[GitHub Actions]: https://github.com/rust-random/getrandom/actions?query=workflow:Tests+branch:master +[Build Status]: https://github.com/rust-random/getrandom/actions/workflows/tests.yml/badge.svg?branch=master +[crates.io]: https://crates.io/crates/getrandom +[Crate]: https://img.shields.io/crates/v/getrandom +[docs.rs]: https://docs.rs/getrandom +[Documentation]: https://docs.rs/getrandom/badge.svg +[deps.rs]: https://deps.rs/repo/github/rust-random/getrandom +[Dependency Status]: https://deps.rs/repo/github/rust-random/getrandom/status.svg +[Downloads]: https://img.shields.io/crates/d/getrandom +[LICENSE-MIT]: https://raw.githubusercontent.com/rust-random/getrandom/master/LICENSE-MIT +[License]: https://img.shields.io/crates/l/getrandom + + +A Rust library for retrieving random data from (operating) system sources. It is +assumed that the system always provides high-quality cryptographically secure random +data, ideally backed by hardware entropy sources. This crate derives its name +from Linux's `getrandom` function, but is cross-platform, roughly supporting +the same set of platforms as Rust's `std` lib. + +This is a low-level API. Most users should prefer using high-level random-number +library like [`rand`]. + +[`rand`]: https://crates.io/crates/rand + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +getrandom = "0.2" +``` + +Then invoke the `getrandom` function: + +```rust +fn get_random_buf() -> Result<[u8; 32], getrandom::Error> { + let mut buf = [0u8; 32]; + getrandom::getrandom(&mut buf)?; + Ok(buf) +} +``` + +For more information about supported targets, entropy sources, `no_std` targets, +crate features, WASM support and Custom RNGs see the +[`getrandom` documentation](https://docs.rs/getrandom/latest) and +[`getrandom::Error` documentation](https://docs.rs/getrandom/latest/getrandom/struct.Error.html). + +## Minimum Supported Rust Version + +This crate requires Rust 1.36.0 or later. + +# License + +The `getrandom` library is distributed under either of + + * [Apache License, Version 2.0](LICENSE-APACHE) + * [MIT license](LICENSE-MIT) + +at your option. diff --git a/vendor/getrandom/SECURITY.md b/vendor/getrandom/SECURITY.md new file mode 100644 index 0000000..19bfb9a --- /dev/null +++ b/vendor/getrandom/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/rust-random/getrandom/security/advisories/new). + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure. diff --git a/vendor/getrandom/benches/buffer.rs b/vendor/getrandom/benches/buffer.rs new file mode 100644 index 0000000..b32be43 --- /dev/null +++ b/vendor/getrandom/benches/buffer.rs @@ -0,0 +1,71 @@ +#![feature(test, maybe_uninit_uninit_array_transpose)] +extern crate test; + +use std::mem::MaybeUninit; + +// Call getrandom on a zero-initialized stack buffer +#[inline(always)] +fn bench_getrandom() { + let mut buf = [0u8; N]; + getrandom::getrandom(&mut buf).unwrap(); + test::black_box(&buf as &[u8]); +} + +// Call getrandom_uninit on an uninitialized stack buffer +#[inline(always)] +fn bench_getrandom_uninit() { + let mut uninit = [MaybeUninit::uninit(); N]; + let buf: &[u8] = getrandom::getrandom_uninit(&mut uninit).unwrap(); + test::black_box(buf); +} + +// We benchmark using #[inline(never)] "inner" functions for two reasons: +// - Avoiding inlining reduces a source of variance when running benchmarks. +// - It is _much_ easier to get the assembly or IR for the inner loop. +// +// For example, using cargo-show-asm (https://github.com/pacak/cargo-show-asm), +// we can get the assembly for a particular benchmark's inner loop by running: +// cargo asm --bench buffer --release buffer::p384::bench_getrandom::inner +macro_rules! bench { + ( $name:ident, $size:expr ) => { + pub mod $name { + #[bench] + pub fn bench_getrandom(b: &mut test::Bencher) { + #[inline(never)] + fn inner() { + super::bench_getrandom::<{ $size }>() + } + + b.bytes = $size as u64; + b.iter(inner); + } + #[bench] + pub fn bench_getrandom_uninit(b: &mut test::Bencher) { + #[inline(never)] + fn inner() { + super::bench_getrandom_uninit::<{ $size }>() + } + + b.bytes = $size as u64; + b.iter(inner); + } + } + }; +} + +// 16 bytes (128 bits) is the size of an 128-bit AES key/nonce. +bench!(aes128, 128 / 8); + +// 32 bytes (256 bits) is the seed sized used for rand::thread_rng +// and the `random` value in a ClientHello/ServerHello for TLS. +// This is also the size of a 256-bit AES/HMAC/P-256/Curve25519 key +// and/or nonce. +bench!(p256, 256 / 8); + +// A P-384/HMAC-384 key and/or nonce. +bench!(p384, 384 / 8); + +// Initializing larger buffers is not the primary use case of this library, as +// this should normally be done by a userspace CSPRNG. However, we have a test +// here to see the effects of a lower (amortized) syscall overhead. +bench!(page, 4096); diff --git a/vendor/getrandom/src/3ds.rs b/vendor/getrandom/src/3ds.rs new file mode 100644 index 0000000..87a32a1 --- /dev/null +++ b/vendor/getrandom/src/3ds.rs @@ -0,0 +1,18 @@ +// Copyright 2021 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for Nintendo 3DS +use crate::util_libc::sys_fill_exact; +use crate::Error; +use core::mem::MaybeUninit; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + sys_fill_exact(dest, |buf| unsafe { + libc::getrandom(buf.as_mut_ptr() as *mut libc::c_void, buf.len(), 0) + }) +} diff --git a/vendor/getrandom/src/apple-other.rs b/vendor/getrandom/src/apple-other.rs new file mode 100644 index 0000000..8f90485 --- /dev/null +++ b/vendor/getrandom/src/apple-other.rs @@ -0,0 +1,27 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for iOS +use crate::Error; +use core::{ffi::c_void, mem::MaybeUninit, ptr::null}; + +#[link(name = "Security", kind = "framework")] +extern "C" { + fn SecRandomCopyBytes(rnd: *const c_void, count: usize, bytes: *mut u8) -> i32; +} + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // Apple's documentation guarantees kSecRandomDefault is a synonym for NULL. + let ret = unsafe { SecRandomCopyBytes(null(), dest.len(), dest.as_mut_ptr() as *mut u8) }; + // errSecSuccess (from SecBase.h) is always zero. + if ret != 0 { + Err(Error::IOS_SEC_RANDOM) + } else { + Ok(()) + } +} diff --git a/vendor/getrandom/src/bsd_arandom.rs b/vendor/getrandom/src/bsd_arandom.rs new file mode 100644 index 0000000..5314c48 --- /dev/null +++ b/vendor/getrandom/src/bsd_arandom.rs @@ -0,0 +1,54 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for FreeBSD and NetBSD +use crate::{ + util_libc::{sys_fill_exact, Weak}, + Error, +}; +use core::{mem::MaybeUninit, ptr}; + +fn kern_arnd(buf: &mut [MaybeUninit]) -> libc::ssize_t { + static MIB: [libc::c_int; 2] = [libc::CTL_KERN, libc::KERN_ARND]; + let mut len = buf.len(); + let ret = unsafe { + libc::sysctl( + MIB.as_ptr(), + MIB.len() as libc::c_uint, + buf.as_mut_ptr() as *mut _, + &mut len, + ptr::null(), + 0, + ) + }; + if ret == -1 { + -1 + } else { + len as libc::ssize_t + } +} + +type GetRandomFn = unsafe extern "C" fn(*mut u8, libc::size_t, libc::c_uint) -> libc::ssize_t; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // getrandom(2) was introduced in FreeBSD 12.0 and NetBSD 10.0 + static GETRANDOM: Weak = unsafe { Weak::new("getrandom\0") }; + if let Some(fptr) = GETRANDOM.ptr() { + let func: GetRandomFn = unsafe { core::mem::transmute(fptr) }; + return sys_fill_exact(dest, |buf| unsafe { + func(buf.as_mut_ptr() as *mut u8, buf.len(), 0) + }); + } + + // Both FreeBSD and NetBSD will only return up to 256 bytes at a time, and + // older NetBSD kernels will fail on longer buffers. + for chunk in dest.chunks_mut(256) { + sys_fill_exact(chunk, kern_arnd)? + } + Ok(()) +} diff --git a/vendor/getrandom/src/custom.rs b/vendor/getrandom/src/custom.rs new file mode 100644 index 0000000..66e4256 --- /dev/null +++ b/vendor/getrandom/src/custom.rs @@ -0,0 +1,114 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! An implementation which calls out to an externally defined function. +use crate::{util::uninit_slice_fill_zero, Error}; +use core::{mem::MaybeUninit, num::NonZeroU32}; + +/// Register a function to be invoked by `getrandom` on unsupported targets. +/// +/// ## Writing a custom `getrandom` implementation +/// +/// The function to register must have the same signature as +/// [`getrandom::getrandom`](crate::getrandom). The function can be defined +/// wherever you want, either in root crate or a dependent crate. +/// +/// For example, if we wanted a `failure-getrandom` crate containing an +/// implementation that always fails, we would first depend on `getrandom` +/// (for the [`Error`] type) in `failure-getrandom/Cargo.toml`: +/// ```toml +/// [dependencies] +/// getrandom = "0.2" +/// ``` +/// Note that the crate containing this function does **not** need to enable the +/// `"custom"` Cargo feature. +/// +/// Next, in `failure-getrandom/src/lib.rs`, we define our function: +/// ```rust +/// use core::num::NonZeroU32; +/// use getrandom::Error; +/// +/// // Some application-specific error code +/// const MY_CUSTOM_ERROR_CODE: u32 = Error::CUSTOM_START + 42; +/// pub fn always_fail(buf: &mut [u8]) -> Result<(), Error> { +/// let code = NonZeroU32::new(MY_CUSTOM_ERROR_CODE).unwrap(); +/// Err(Error::from(code)) +/// } +/// ``` +/// +/// ## Registering a custom `getrandom` implementation +/// +/// Functions can only be registered in the root binary crate. Attempting to +/// register a function in a non-root crate will result in a linker error. +/// This is similar to +/// [`#[panic_handler]`](https://doc.rust-lang.org/nomicon/panic-handler.html) or +/// [`#[global_allocator]`](https://doc.rust-lang.org/edition-guide/rust-2018/platform-and-target-support/global-allocators.html), +/// where helper crates define handlers/allocators but only the binary crate +/// actually _uses_ the functionality. +/// +/// To register the function, we first depend on `failure-getrandom` _and_ +/// `getrandom` in `Cargo.toml`: +/// ```toml +/// [dependencies] +/// failure-getrandom = "0.1" +/// getrandom = { version = "0.2", features = ["custom"] } +/// ``` +/// +/// Then, we register the function in `src/main.rs`: +/// ```rust +/// # mod failure_getrandom { pub fn always_fail(_: &mut [u8]) -> Result<(), getrandom::Error> { unimplemented!() } } +/// use failure_getrandom::always_fail; +/// use getrandom::register_custom_getrandom; +/// +/// register_custom_getrandom!(always_fail); +/// ``` +/// +/// Now any user of `getrandom` (direct or indirect) on this target will use the +/// registered function. As noted in the +/// [top-level documentation](index.html#custom-implementations) this +/// registration only has an effect on unsupported targets. +#[macro_export] +#[cfg_attr(docsrs, doc(cfg(feature = "custom")))] +macro_rules! register_custom_getrandom { + ($path:path) => { + // TODO(MSRV 1.37): change to unnamed block + const __GETRANDOM_INTERNAL: () = { + // We use Rust ABI to be safe against potential panics in the passed function. + #[no_mangle] + unsafe fn __getrandom_custom(dest: *mut u8, len: usize) -> u32 { + // Make sure the passed function has the type of getrandom::getrandom + type F = fn(&mut [u8]) -> ::core::result::Result<(), $crate::Error>; + let _: F = $crate::getrandom; + let f: F = $path; + let slice = ::core::slice::from_raw_parts_mut(dest, len); + match f(slice) { + Ok(()) => 0, + Err(e) => e.code().get(), + } + } + }; + }; +} + +#[allow(dead_code)] +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + extern "Rust" { + fn __getrandom_custom(dest: *mut u8, len: usize) -> u32; + } + // Previously we always passed a valid, initialized slice to + // `__getrandom_custom`. Ensure `dest` has been initialized for backward + // compatibility with implementations that rely on that (e.g. Rust + // implementations that construct a `&mut [u8]` slice from `dest` and + // `len`). + let dest = uninit_slice_fill_zero(dest); + let ret = unsafe { __getrandom_custom(dest.as_mut_ptr(), dest.len()) }; + match NonZeroU32::new(ret) { + None => Ok(()), + Some(code) => Err(Error::from(code)), + } +} diff --git a/vendor/getrandom/src/dragonfly.rs b/vendor/getrandom/src/dragonfly.rs new file mode 100644 index 0000000..d3ef00a --- /dev/null +++ b/vendor/getrandom/src/dragonfly.rs @@ -0,0 +1,30 @@ +// Copyright 2021 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for DragonFly BSD +use crate::{ + use_file, + util_libc::{sys_fill_exact, Weak}, + Error, +}; +use core::mem::MaybeUninit; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + static GETRANDOM: Weak = unsafe { Weak::new("getrandom\0") }; + type GetRandomFn = unsafe extern "C" fn(*mut u8, libc::size_t, libc::c_uint) -> libc::ssize_t; + + // getrandom(2) was introduced in DragonflyBSD 5.7 + if let Some(fptr) = GETRANDOM.ptr() { + let func: GetRandomFn = unsafe { core::mem::transmute(fptr) }; + return sys_fill_exact(dest, |buf| unsafe { + func(buf.as_mut_ptr() as *mut u8, buf.len(), 0) + }); + } else { + use_file::getrandom_inner(dest) + } +} diff --git a/vendor/getrandom/src/emscripten.rs b/vendor/getrandom/src/emscripten.rs new file mode 100644 index 0000000..30221c6 --- /dev/null +++ b/vendor/getrandom/src/emscripten.rs @@ -0,0 +1,13 @@ +//! Implementation for Emscripten +use crate::{util_libc::last_os_error, Error}; +use core::mem::MaybeUninit; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // Emscripten 2.0.5 added getentropy, so we can use it unconditionally. + // Unlike other getentropy implementations, there is no max buffer length. + let ret = unsafe { libc::getentropy(dest.as_mut_ptr() as *mut libc::c_void, dest.len()) }; + if ret < 0 { + return Err(last_os_error()); + } + Ok(()) +} diff --git a/vendor/getrandom/src/error.rs b/vendor/getrandom/src/error.rs new file mode 100644 index 0000000..ab39a3c --- /dev/null +++ b/vendor/getrandom/src/error.rs @@ -0,0 +1,191 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +use core::{fmt, num::NonZeroU32}; + +/// A small and `no_std` compatible error type +/// +/// The [`Error::raw_os_error()`] will indicate if the error is from the OS, and +/// if so, which error code the OS gave the application. If such an error is +/// encountered, please consult with your system documentation. +/// +/// Internally this type is a NonZeroU32, with certain values reserved for +/// certain purposes, see [`Error::INTERNAL_START`] and [`Error::CUSTOM_START`]. +/// +/// *If this crate's `"std"` Cargo feature is enabled*, then: +/// - [`getrandom::Error`][Error] implements +/// [`std::error::Error`](https://doc.rust-lang.org/std/error/trait.Error.html) +/// - [`std::io::Error`](https://doc.rust-lang.org/std/io/struct.Error.html) implements +/// [`From`](https://doc.rust-lang.org/std/convert/trait.From.html). +#[derive(Copy, Clone, Eq, PartialEq)] +pub struct Error(NonZeroU32); + +const fn internal_error(n: u16) -> Error { + // SAFETY: code > 0 as INTERNAL_START > 0 and adding n won't overflow a u32. + let code = Error::INTERNAL_START + (n as u32); + Error(unsafe { NonZeroU32::new_unchecked(code) }) +} + +impl Error { + /// This target/platform is not supported by `getrandom`. + pub const UNSUPPORTED: Error = internal_error(0); + /// The platform-specific `errno` returned a non-positive value. + pub const ERRNO_NOT_POSITIVE: Error = internal_error(1); + /// Call to iOS [`SecRandomCopyBytes`](https://developer.apple.com/documentation/security/1399291-secrandomcopybytes) failed. + pub const IOS_SEC_RANDOM: Error = internal_error(3); + /// Call to Windows [`RtlGenRandom`](https://docs.microsoft.com/en-us/windows/win32/api/ntsecapi/nf-ntsecapi-rtlgenrandom) failed. + pub const WINDOWS_RTL_GEN_RANDOM: Error = internal_error(4); + /// RDRAND instruction failed due to a hardware issue. + pub const FAILED_RDRAND: Error = internal_error(5); + /// RDRAND instruction unsupported on this target. + pub const NO_RDRAND: Error = internal_error(6); + /// The environment does not support the Web Crypto API. + pub const WEB_CRYPTO: Error = internal_error(7); + /// Calling Web Crypto API `crypto.getRandomValues` failed. + pub const WEB_GET_RANDOM_VALUES: Error = internal_error(8); + /// On VxWorks, call to `randSecure` failed (random number generator is not yet initialized). + pub const VXWORKS_RAND_SECURE: Error = internal_error(11); + /// Node.js does not have the `crypto` CommonJS module. + pub const NODE_CRYPTO: Error = internal_error(12); + /// Calling Node.js function `crypto.randomFillSync` failed. + pub const NODE_RANDOM_FILL_SYNC: Error = internal_error(13); + /// Called from an ES module on Node.js. This is unsupported, see: + /// . + pub const NODE_ES_MODULE: Error = internal_error(14); + + /// Codes below this point represent OS Errors (i.e. positive i32 values). + /// Codes at or above this point, but below [`Error::CUSTOM_START`] are + /// reserved for use by the `rand` and `getrandom` crates. + pub const INTERNAL_START: u32 = 1 << 31; + + /// Codes at or above this point can be used by users to define their own + /// custom errors. + pub const CUSTOM_START: u32 = (1 << 31) + (1 << 30); + + /// Extract the raw OS error code (if this error came from the OS) + /// + /// This method is identical to [`std::io::Error::raw_os_error()`][1], except + /// that it works in `no_std` contexts. If this method returns `None`, the + /// error value can still be formatted via the `Display` implementation. + /// + /// [1]: https://doc.rust-lang.org/std/io/struct.Error.html#method.raw_os_error + #[inline] + pub fn raw_os_error(self) -> Option { + if self.0.get() < Self::INTERNAL_START { + match () { + #[cfg(target_os = "solid_asp3")] + // On SOLID, negate the error code again to obtain the original + // error code. + () => Some(-(self.0.get() as i32)), + #[cfg(not(target_os = "solid_asp3"))] + () => Some(self.0.get() as i32), + } + } else { + None + } + } + + /// Extract the bare error code. + /// + /// This code can either come from the underlying OS, or be a custom error. + /// Use [`Error::raw_os_error()`] to disambiguate. + #[inline] + pub const fn code(self) -> NonZeroU32 { + self.0 + } +} + +cfg_if! { + if #[cfg(unix)] { + fn os_err(errno: i32, buf: &mut [u8]) -> Option<&str> { + let buf_ptr = buf.as_mut_ptr() as *mut libc::c_char; + if unsafe { libc::strerror_r(errno, buf_ptr, buf.len()) } != 0 { + return None; + } + + // Take up to trailing null byte + let n = buf.len(); + let idx = buf.iter().position(|&b| b == 0).unwrap_or(n); + core::str::from_utf8(&buf[..idx]).ok() + } + } else { + fn os_err(_errno: i32, _buf: &mut [u8]) -> Option<&str> { + None + } + } +} + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut dbg = f.debug_struct("Error"); + if let Some(errno) = self.raw_os_error() { + dbg.field("os_error", &errno); + let mut buf = [0u8; 128]; + if let Some(err) = os_err(errno, &mut buf) { + dbg.field("description", &err); + } + } else if let Some(desc) = internal_desc(*self) { + dbg.field("internal_code", &self.0.get()); + dbg.field("description", &desc); + } else { + dbg.field("unknown_code", &self.0.get()); + } + dbg.finish() + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(errno) = self.raw_os_error() { + let mut buf = [0u8; 128]; + match os_err(errno, &mut buf) { + Some(err) => err.fmt(f), + None => write!(f, "OS Error: {}", errno), + } + } else if let Some(desc) = internal_desc(*self) { + f.write_str(desc) + } else { + write!(f, "Unknown Error: {}", self.0.get()) + } + } +} + +impl From for Error { + fn from(code: NonZeroU32) -> Self { + Self(code) + } +} + +fn internal_desc(error: Error) -> Option<&'static str> { + match error { + Error::UNSUPPORTED => Some("getrandom: this target is not supported"), + Error::ERRNO_NOT_POSITIVE => Some("errno: did not return a positive value"), + Error::IOS_SEC_RANDOM => Some("SecRandomCopyBytes: iOS Security framework failure"), + Error::WINDOWS_RTL_GEN_RANDOM => Some("RtlGenRandom: Windows system function failure"), + Error::FAILED_RDRAND => Some("RDRAND: failed multiple times: CPU issue likely"), + Error::NO_RDRAND => Some("RDRAND: instruction not supported"), + Error::WEB_CRYPTO => Some("Web Crypto API is unavailable"), + Error::WEB_GET_RANDOM_VALUES => Some("Calling Web API crypto.getRandomValues failed"), + Error::VXWORKS_RAND_SECURE => Some("randSecure: VxWorks RNG module is not initialized"), + Error::NODE_CRYPTO => Some("Node.js crypto CommonJS module is unavailable"), + Error::NODE_RANDOM_FILL_SYNC => Some("Calling Node.js API crypto.randomFillSync failed"), + Error::NODE_ES_MODULE => Some("Node.js ES modules are not directly supported, see https://docs.rs/getrandom#nodejs-es-module-support"), + _ => None, + } +} + +#[cfg(test)] +mod tests { + use super::Error; + use core::mem::size_of; + + #[test] + fn test_size() { + assert_eq!(size_of::(), 4); + assert_eq!(size_of::>(), 4); + } +} diff --git a/vendor/getrandom/src/error_impls.rs b/vendor/getrandom/src/error_impls.rs new file mode 100644 index 0000000..61f46d2 --- /dev/null +++ b/vendor/getrandom/src/error_impls.rs @@ -0,0 +1,24 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![cfg_attr(docsrs, doc(cfg(feature = "std")))] +extern crate std; + +use crate::Error; +use core::convert::From; +use std::io; + +impl From for io::Error { + fn from(err: Error) -> Self { + match err.raw_os_error() { + Some(errno) => io::Error::from_raw_os_error(errno), + None => io::Error::new(io::ErrorKind::Other, err), + } + } +} + +impl std::error::Error for Error {} diff --git a/vendor/getrandom/src/espidf.rs b/vendor/getrandom/src/espidf.rs new file mode 100644 index 0000000..d074dc4 --- /dev/null +++ b/vendor/getrandom/src/espidf.rs @@ -0,0 +1,26 @@ +// Copyright 2021 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for ESP-IDF +use crate::Error; +use core::{ffi::c_void, mem::MaybeUninit}; + +extern "C" { + fn esp_fill_random(buf: *mut c_void, len: usize) -> u32; +} + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // Not that NOT enabling WiFi, BT, or the voltage noise entropy source (via `bootloader_random_enable`) + // will cause ESP-IDF to return pseudo-random numbers based on the voltage noise entropy, after the initial boot process: + // https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/system/random.html + // + // However tracking if some of these entropy sources is enabled is way too difficult to implement here + unsafe { esp_fill_random(dest.as_mut_ptr().cast(), dest.len()) }; + + Ok(()) +} diff --git a/vendor/getrandom/src/fuchsia.rs b/vendor/getrandom/src/fuchsia.rs new file mode 100644 index 0000000..5a135f3 --- /dev/null +++ b/vendor/getrandom/src/fuchsia.rs @@ -0,0 +1,21 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for Fuchsia Zircon +use crate::Error; +use core::mem::MaybeUninit; + +#[link(name = "zircon")] +extern "C" { + fn zx_cprng_draw(buffer: *mut u8, length: usize); +} + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + unsafe { zx_cprng_draw(dest.as_mut_ptr() as *mut u8, dest.len()) } + Ok(()) +} diff --git a/vendor/getrandom/src/hermit.rs b/vendor/getrandom/src/hermit.rs new file mode 100644 index 0000000..570b03d --- /dev/null +++ b/vendor/getrandom/src/hermit.rs @@ -0,0 +1,21 @@ +use crate::Error; +use core::{cmp::min, mem::MaybeUninit, num::NonZeroU32}; + +extern "C" { + fn sys_read_entropy(buffer: *mut u8, length: usize, flags: u32) -> isize; +} + +pub fn getrandom_inner(mut dest: &mut [MaybeUninit]) -> Result<(), Error> { + while !dest.is_empty() { + let res = unsafe { sys_read_entropy(dest.as_mut_ptr() as *mut u8, dest.len(), 0) }; + if res < 0 { + // SAFETY: all Hermit error codes use i32 under the hood: + // https://github.com/hermitcore/libhermit-rs/blob/master/src/errno.rs + let code = unsafe { NonZeroU32::new_unchecked((-res) as u32) }; + return Err(code.into()); + } + let len = min(res as usize, dest.len()); + dest = &mut dest[len..]; + } + Ok(()) +} diff --git a/vendor/getrandom/src/hurd.rs b/vendor/getrandom/src/hurd.rs new file mode 100644 index 0000000..842b9bc --- /dev/null +++ b/vendor/getrandom/src/hurd.rs @@ -0,0 +1,18 @@ +// Copyright 2021 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for GNU/Hurd +use crate::util_libc::sys_fill_exact; +use crate::Error; +use core::mem::MaybeUninit; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + sys_fill_exact(dest, |buf| unsafe { + libc::getrandom(buf.as_mut_ptr() as *mut libc::c_void, buf.len(), 0) + }) +} diff --git a/vendor/getrandom/src/js.rs b/vendor/getrandom/src/js.rs new file mode 100644 index 0000000..d031282 --- /dev/null +++ b/vendor/getrandom/src/js.rs @@ -0,0 +1,161 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +use crate::Error; + +extern crate std; +use std::{mem::MaybeUninit, thread_local}; + +use js_sys::{global, Function, Uint8Array}; +use wasm_bindgen::{prelude::wasm_bindgen, JsCast, JsValue}; + +// Size of our temporary Uint8Array buffer used with WebCrypto methods +// Maximum is 65536 bytes see https://developer.mozilla.org/en-US/docs/Web/API/Crypto/getRandomValues +const WEB_CRYPTO_BUFFER_SIZE: usize = 256; +// Node.js's crypto.randomFillSync requires the size to be less than 2**31. +const NODE_MAX_BUFFER_SIZE: usize = (1 << 31) - 1; + +enum RngSource { + Node(NodeCrypto), + Web(WebCrypto, Uint8Array), +} + +// JsValues are always per-thread, so we initialize RngSource for each thread. +// See: https://github.com/rustwasm/wasm-bindgen/pull/955 +thread_local!( + static RNG_SOURCE: Result = getrandom_init(); +); + +pub(crate) fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + RNG_SOURCE.with(|result| { + let source = result.as_ref().map_err(|&e| e)?; + + match source { + RngSource::Node(n) => { + for chunk in dest.chunks_mut(NODE_MAX_BUFFER_SIZE) { + // SAFETY: chunk is never used directly, the memory is only + // modified via the Uint8Array view, which is passed + // directly to JavaScript. Also, crypto.randomFillSync does + // not resize the buffer. We know the length is less than + // u32::MAX because of the chunking above. + // Note that this uses the fact that JavaScript doesn't + // have a notion of "uninitialized memory", this is purely + // a Rust/C/C++ concept. + let res = n.random_fill_sync(unsafe { + Uint8Array::view_mut_raw(chunk.as_mut_ptr() as *mut u8, chunk.len()) + }); + if res.is_err() { + return Err(Error::NODE_RANDOM_FILL_SYNC); + } + } + } + RngSource::Web(crypto, buf) => { + // getRandomValues does not work with all types of WASM memory, + // so we initially write to browser memory to avoid exceptions. + for chunk in dest.chunks_mut(WEB_CRYPTO_BUFFER_SIZE) { + // The chunk can be smaller than buf's length, so we call to + // JS to create a smaller view of buf without allocation. + let sub_buf = buf.subarray(0, chunk.len() as u32); + + if crypto.get_random_values(&sub_buf).is_err() { + return Err(Error::WEB_GET_RANDOM_VALUES); + } + + // SAFETY: `sub_buf`'s length is the same length as `chunk` + unsafe { sub_buf.raw_copy_to_ptr(chunk.as_mut_ptr() as *mut u8) }; + } + } + }; + Ok(()) + }) +} + +fn getrandom_init() -> Result { + let global: Global = global().unchecked_into(); + + // Get the Web Crypto interface if we are in a browser, Web Worker, Deno, + // or another environment that supports the Web Cryptography API. This + // also allows for user-provided polyfills in unsupported environments. + let crypto = match global.crypto() { + // Standard Web Crypto interface + c if c.is_object() => c, + // Node.js CommonJS Crypto module + _ if is_node(&global) => { + // If module.require isn't a valid function, we are in an ES module. + match Module::require_fn().and_then(JsCast::dyn_into::) { + Ok(require_fn) => match require_fn.call1(&global, &JsValue::from_str("crypto")) { + Ok(n) => return Ok(RngSource::Node(n.unchecked_into())), + Err(_) => return Err(Error::NODE_CRYPTO), + }, + Err(_) => return Err(Error::NODE_ES_MODULE), + } + } + // IE 11 Workaround + _ => match global.ms_crypto() { + c if c.is_object() => c, + _ => return Err(Error::WEB_CRYPTO), + }, + }; + + let buf = Uint8Array::new_with_length(WEB_CRYPTO_BUFFER_SIZE as u32); + Ok(RngSource::Web(crypto, buf)) +} + +// Taken from https://www.npmjs.com/package/browser-or-node +fn is_node(global: &Global) -> bool { + let process = global.process(); + if process.is_object() { + let versions = process.versions(); + if versions.is_object() { + return versions.node().is_string(); + } + } + false +} + +#[wasm_bindgen] +extern "C" { + // Return type of js_sys::global() + type Global; + + // Web Crypto API: Crypto interface (https://www.w3.org/TR/WebCryptoAPI/) + type WebCrypto; + // Getters for the WebCrypto API + #[wasm_bindgen(method, getter)] + fn crypto(this: &Global) -> WebCrypto; + #[wasm_bindgen(method, getter, js_name = msCrypto)] + fn ms_crypto(this: &Global) -> WebCrypto; + // Crypto.getRandomValues() + #[wasm_bindgen(method, js_name = getRandomValues, catch)] + fn get_random_values(this: &WebCrypto, buf: &Uint8Array) -> Result<(), JsValue>; + + // Node JS crypto module (https://nodejs.org/api/crypto.html) + type NodeCrypto; + // crypto.randomFillSync() + #[wasm_bindgen(method, js_name = randomFillSync, catch)] + fn random_fill_sync(this: &NodeCrypto, buf: Uint8Array) -> Result<(), JsValue>; + + // Ideally, we would just use `fn require(s: &str)` here. However, doing + // this causes a Webpack warning. So we instead return the function itself + // and manually invoke it using call1. This also lets us to check that the + // function actually exists, allowing for better error messages. See: + // https://github.com/rust-random/getrandom/issues/224 + // https://github.com/rust-random/getrandom/issues/256 + type Module; + #[wasm_bindgen(getter, static_method_of = Module, js_class = module, js_name = require, catch)] + fn require_fn() -> Result; + + // Node JS process Object (https://nodejs.org/api/process.html) + #[wasm_bindgen(method, getter)] + fn process(this: &Global) -> Process; + type Process; + #[wasm_bindgen(method, getter)] + fn versions(this: &Process) -> Versions; + type Versions; + #[wasm_bindgen(method, getter)] + fn node(this: &Versions) -> JsValue; +} diff --git a/vendor/getrandom/src/lib.rs b/vendor/getrandom/src/lib.rs new file mode 100644 index 0000000..10cc227 --- /dev/null +++ b/vendor/getrandom/src/lib.rs @@ -0,0 +1,351 @@ +// Copyright 2019 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Interface to the operating system's random number generator. +//! +//! # Supported targets +//! +//! | Target | Target Triple | Implementation +//! | ----------------- | ------------------ | -------------- +//! | Linux, Android | `*‑linux‑*` | [`getrandom`][1] system call if available, otherwise [`/dev/urandom`][2] after successfully polling `/dev/random` +//! | Windows | `*‑windows‑*` | [`BCryptGenRandom`] +//! | macOS | `*‑apple‑darwin` | [`getentropy`][3] if available, otherwise [`/dev/urandom`][4] (identical to `/dev/random`) +//! | iOS, tvOS, watchOS | `*‑apple‑ios`, `*-apple-tvos`, `*-apple-watchos` | [`SecRandomCopyBytes`] +//! | FreeBSD | `*‑freebsd` | [`getrandom`][5] if available, otherwise [`kern.arandom`][6] +//! | OpenBSD | `*‑openbsd` | [`getentropy`][7] +//! | NetBSD | `*‑netbsd` | [`getrandom`][16] if available, otherwise [`kern.arandom`][8] +//! | Dragonfly BSD | `*‑dragonfly` | [`getrandom`][9] if available, otherwise [`/dev/urandom`][10] (identical to `/dev/random`) +//! | Solaris, illumos | `*‑solaris`, `*‑illumos` | [`getrandom`][11] if available, otherwise [`/dev/random`][12] +//! | Fuchsia OS | `*‑fuchsia` | [`cprng_draw`] +//! | Redox | `*‑redox` | `/dev/urandom` +//! | Haiku | `*‑haiku` | `/dev/urandom` (identical to `/dev/random`) +//! | Hermit | `*-hermit` | [`sys_read_entropy`] +//! | Hurd | `*-hurd-*` | [`getrandom`][17] +//! | SGX | `x86_64‑*‑sgx` | [`RDRAND`] +//! | VxWorks | `*‑wrs‑vxworks‑*` | `randABytes` after checking entropy pool initialization with `randSecure` +//! | ESP-IDF | `*‑espidf` | [`esp_fill_random`] +//! | Emscripten | `*‑emscripten` | [`getentropy`][13] +//! | WASI | `wasm32‑wasi` | [`random_get`] +//! | Web Browser and Node.js | `wasm*‑*‑unknown` | [`Crypto.getRandomValues`] if available, then [`crypto.randomFillSync`] if on Node.js, see [WebAssembly support] +//! | SOLID | `*-kmc-solid_*` | `SOLID_RNG_SampleRandomBytes` +//! | Nintendo 3DS | `armv6k-nintendo-3ds` | [`getrandom`][1] +//! | PS Vita | `armv7-sony-vita-newlibeabihf` | [`getentropy`][13] +//! | QNX Neutrino | `*‑nto-qnx*` | [`/dev/urandom`][14] (identical to `/dev/random`) +//! | AIX | `*-ibm-aix` | [`/dev/urandom`][15] +//! +//! There is no blanket implementation on `unix` targets that reads from +//! `/dev/urandom`. This ensures all supported targets are using the recommended +//! interface and respect maximum buffer sizes. +//! +//! Pull Requests that add support for new targets to `getrandom` are always welcome. +//! +//! ## Unsupported targets +//! +//! By default, `getrandom` will not compile on unsupported targets, but certain +//! features allow a user to select a "fallback" implementation if no supported +//! implementation exists. +//! +//! All of the below mechanisms only affect unsupported +//! targets. Supported targets will _always_ use their supported implementations. +//! This prevents a crate from overriding a secure source of randomness +//! (either accidentally or intentionally). +//! +//! ### RDRAND on x86 +//! +//! *If the `rdrand` Cargo feature is enabled*, `getrandom` will fallback to using +//! the [`RDRAND`] instruction to get randomness on `no_std` `x86`/`x86_64` +//! targets. This feature has no effect on other CPU architectures. +//! +//! ### WebAssembly support +//! +//! This crate fully supports the +//! [`wasm32-wasi`](https://github.com/CraneStation/wasi) and +//! [`wasm32-unknown-emscripten`](https://www.hellorust.com/setup/emscripten/) +//! targets. However, the `wasm32-unknown-unknown` target (i.e. the target used +//! by `wasm-pack`) is not automatically +//! supported since, from the target name alone, we cannot deduce which +//! JavaScript interface is in use (or if JavaScript is available at all). +//! +//! Instead, *if the `js` Cargo feature is enabled*, this crate will assume +//! that you are building for an environment containing JavaScript, and will +//! call the appropriate methods. Both web browser (main window and Web Workers) +//! and Node.js environments are supported, invoking the methods +//! [described above](#supported-targets) using the [`wasm-bindgen`] toolchain. +//! +//! To enable the `js` Cargo feature, add the following to the `dependencies` +//! section in your `Cargo.toml` file: +//! ```toml +//! [dependencies] +//! getrandom = { version = "0.2", features = ["js"] } +//! ``` +//! +//! This can be done even if `getrandom` is not a direct dependency. Cargo +//! allows crates to enable features for indirect dependencies. +//! +//! This feature should only be enabled for binary, test, or benchmark crates. +//! Library crates should generally not enable this feature, leaving such a +//! decision to *users* of their library. Also, libraries should not introduce +//! their own `js` features *just* to enable `getrandom`'s `js` feature. +//! +//! This feature has no effect on targets other than `wasm32-unknown-unknown`. +//! +//! #### Node.js ES module support +//! +//! Node.js supports both [CommonJS modules] and [ES modules]. Due to +//! limitations in wasm-bindgen's [`module`] support, we cannot directly +//! support ES Modules running on Node.js. However, on Node v15 and later, the +//! module author can add a simple shim to support the Web Cryptography API: +//! ```js +//! import { webcrypto } from 'node:crypto' +//! globalThis.crypto = webcrypto +//! ``` +//! This crate will then use the provided `webcrypto` implementation. +//! +//! ### Custom implementations +//! +//! The [`register_custom_getrandom!`] macro allows a user to mark their own +//! function as the backing implementation for [`getrandom`]. See the macro's +//! documentation for more information about writing and registering your own +//! custom implementations. +//! +//! Note that registering a custom implementation only has an effect on targets +//! that would otherwise not compile. Any supported targets (including those +//! using `rdrand` and `js` Cargo features) continue using their normal +//! implementations even if a function is registered. +//! +//! ## Early boot +//! +//! Sometimes, early in the boot process, the OS has not collected enough +//! entropy to securely seed its RNG. This is especially common on virtual +//! machines, where standard "random" events are hard to come by. +//! +//! Some operating system interfaces always block until the RNG is securely +//! seeded. This can take anywhere from a few seconds to more than a minute. +//! A few (Linux, NetBSD and Solaris) offer a choice between blocking and +//! getting an error; in these cases, we always choose to block. +//! +//! On Linux (when the `getrandom` system call is not available), reading from +//! `/dev/urandom` never blocks, even when the OS hasn't collected enough +//! entropy yet. To avoid returning low-entropy bytes, we first poll +//! `/dev/random` and only switch to `/dev/urandom` once this has succeeded. +//! +//! On OpenBSD, this kind of entropy accounting isn't available, and on +//! NetBSD, blocking on it is discouraged. On these platforms, nonblocking +//! interfaces are used, even when reliable entropy may not be available. +//! On the platforms where it is used, the reliability of entropy accounting +//! itself isn't free from controversy. This library provides randomness +//! sourced according to the platform's best practices, but each platform has +//! its own limits on the grade of randomness it can promise in environments +//! with few sources of entropy. +//! +//! ## Error handling +//! +//! We always choose failure over returning known insecure "random" bytes. In +//! general, on supported platforms, failure is highly unlikely, though not +//! impossible. If an error does occur, then it is likely that it will occur +//! on every call to `getrandom`, hence after the first successful call one +//! can be reasonably confident that no errors will occur. +//! +//! [1]: http://man7.org/linux/man-pages/man2/getrandom.2.html +//! [2]: http://man7.org/linux/man-pages/man4/urandom.4.html +//! [3]: https://www.unix.com/man-page/mojave/2/getentropy/ +//! [4]: https://www.unix.com/man-page/mojave/4/urandom/ +//! [5]: https://www.freebsd.org/cgi/man.cgi?query=getrandom&manpath=FreeBSD+12.0-stable +//! [6]: https://www.freebsd.org/cgi/man.cgi?query=random&sektion=4 +//! [7]: https://man.openbsd.org/getentropy.2 +//! [8]: https://man.netbsd.org/sysctl.7 +//! [9]: https://leaf.dragonflybsd.org/cgi/web-man?command=getrandom +//! [10]: https://leaf.dragonflybsd.org/cgi/web-man?command=random§ion=4 +//! [11]: https://docs.oracle.com/cd/E88353_01/html/E37841/getrandom-2.html +//! [12]: https://docs.oracle.com/cd/E86824_01/html/E54777/random-7d.html +//! [13]: https://github.com/emscripten-core/emscripten/pull/12240 +//! [14]: https://www.qnx.com/developers/docs/7.1/index.html#com.qnx.doc.neutrino.utilities/topic/r/random.html +//! [15]: https://www.ibm.com/docs/en/aix/7.3?topic=files-random-urandom-devices +//! [16]: https://man.netbsd.org/getrandom.2 +//! [17]: https://www.gnu.org/software/libc/manual/html_mono/libc.html#index-getrandom +//! +//! [`BCryptGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom +//! [`Crypto.getRandomValues`]: https://www.w3.org/TR/WebCryptoAPI/#Crypto-method-getRandomValues +//! [`RDRAND`]: https://software.intel.com/en-us/articles/intel-digital-random-number-generator-drng-software-implementation-guide +//! [`SecRandomCopyBytes`]: https://developer.apple.com/documentation/security/1399291-secrandomcopybytes?language=objc +//! [`cprng_draw`]: https://fuchsia.dev/fuchsia-src/zircon/syscalls/cprng_draw +//! [`crypto.randomFillSync`]: https://nodejs.org/api/crypto.html#cryptorandomfillsyncbuffer-offset-size +//! [`esp_fill_random`]: https://docs.espressif.com/projects/esp-idf/en/latest/esp32/api-reference/system/random.html#_CPPv415esp_fill_randomPv6size_t +//! [`random_get`]: https://github.com/WebAssembly/WASI/blob/main/phases/snapshot/docs.md#-random_getbuf-pointeru8-buf_len-size---errno +//! [WebAssembly support]: #webassembly-support +//! [`wasm-bindgen`]: https://github.com/rustwasm/wasm-bindgen +//! [`module`]: https://rustwasm.github.io/wasm-bindgen/reference/attributes/on-js-imports/module.html +//! [CommonJS modules]: https://nodejs.org/api/modules.html +//! [ES modules]: https://nodejs.org/api/esm.html +//! [`sys_read_entropy`]: https://github.com/hermit-os/kernel/blob/315f58ff5efc81d9bf0618af85a59963ff55f8b1/src/syscalls/entropy.rs#L47-L55 + +#![doc( + html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", + html_favicon_url = "https://www.rust-lang.org/favicon.ico", + html_root_url = "https://docs.rs/getrandom/0.2.11" +)] +#![no_std] +#![warn(rust_2018_idioms, unused_lifetimes, missing_docs)] +#![cfg_attr(docsrs, feature(doc_cfg))] + +#[macro_use] +extern crate cfg_if; + +use crate::util::{slice_as_uninit_mut, slice_assume_init_mut}; +use core::mem::MaybeUninit; + +mod error; +mod util; +// To prevent a breaking change when targets are added, we always export the +// register_custom_getrandom macro, so old Custom RNG crates continue to build. +#[cfg(feature = "custom")] +mod custom; +#[cfg(feature = "std")] +mod error_impls; + +pub use crate::error::Error; + +// System-specific implementations. +// +// These should all provide getrandom_inner with the signature +// `fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error>`. +// The function MUST fully initialize `dest` when `Ok(())` is returned. +// The function MUST NOT ever write uninitialized bytes into `dest`, +// regardless of what value it returns. +cfg_if! { + if #[cfg(any(target_os = "haiku", target_os = "redox", target_os = "nto", target_os = "aix"))] { + mod util_libc; + #[path = "use_file.rs"] mod imp; + } else if #[cfg(any(target_os = "android", target_os = "linux"))] { + mod util_libc; + mod use_file; + #[path = "linux_android.rs"] mod imp; + } else if #[cfg(any(target_os = "illumos", target_os = "solaris"))] { + mod util_libc; + mod use_file; + #[path = "solaris_illumos.rs"] mod imp; + } else if #[cfg(any(target_os = "freebsd", target_os = "netbsd"))] { + mod util_libc; + #[path = "bsd_arandom.rs"] mod imp; + } else if #[cfg(target_os = "dragonfly")] { + mod util_libc; + mod use_file; + #[path = "dragonfly.rs"] mod imp; + } else if #[cfg(target_os = "fuchsia")] { + #[path = "fuchsia.rs"] mod imp; + } else if #[cfg(any(target_os = "ios", target_os = "watchos", target_os = "tvos"))] { + #[path = "apple-other.rs"] mod imp; + } else if #[cfg(target_os = "macos")] { + mod util_libc; + mod use_file; + #[path = "macos.rs"] mod imp; + } else if #[cfg(target_os = "openbsd")] { + mod util_libc; + #[path = "openbsd.rs"] mod imp; + } else if #[cfg(all(target_arch = "wasm32", target_os = "wasi"))] { + #[path = "wasi.rs"] mod imp; + } else if #[cfg(target_os = "hermit")] { + #[path = "hermit.rs"] mod imp; + } else if #[cfg(target_os = "vxworks")] { + mod util_libc; + #[path = "vxworks.rs"] mod imp; + } else if #[cfg(target_os = "solid_asp3")] { + #[path = "solid.rs"] mod imp; + } else if #[cfg(target_os = "espidf")] { + #[path = "espidf.rs"] mod imp; + } else if #[cfg(windows)] { + #[path = "windows.rs"] mod imp; + } else if #[cfg(all(target_os = "horizon", target_arch = "arm"))] { + // We check for target_arch = "arm" because the Nintendo Switch also + // uses Horizon OS (it is aarch64). + mod util_libc; + #[path = "3ds.rs"] mod imp; + } else if #[cfg(target_os = "vita")] { + mod util_libc; + #[path = "vita.rs"] mod imp; + } else if #[cfg(target_os = "emscripten")] { + mod util_libc; + #[path = "emscripten.rs"] mod imp; + } else if #[cfg(all(target_arch = "x86_64", target_env = "sgx"))] { + #[path = "rdrand.rs"] mod imp; + } else if #[cfg(all(feature = "rdrand", + any(target_arch = "x86_64", target_arch = "x86")))] { + #[path = "rdrand.rs"] mod imp; + } else if #[cfg(all(feature = "js", + any(target_arch = "wasm32", target_arch = "wasm64"), + target_os = "unknown"))] { + #[path = "js.rs"] mod imp; + } else if #[cfg(target_os = "hurd")] { + mod util_libc; + #[path = "hurd.rs"] mod imp; + } else if #[cfg(feature = "custom")] { + use custom as imp; + } else if #[cfg(all(any(target_arch = "wasm32", target_arch = "wasm64"), + target_os = "unknown"))] { + compile_error!("the wasm*-unknown-unknown targets are not supported by \ + default, you may need to enable the \"js\" feature. \ + For more information see: \ + https://docs.rs/getrandom/#webassembly-support"); + } else { + compile_error!("target is not supported, for more information see: \ + https://docs.rs/getrandom/#unsupported-targets"); + } +} + +/// Fill `dest` with random bytes from the system's preferred random number +/// source. +/// +/// This function returns an error on any failure, including partial reads. We +/// make no guarantees regarding the contents of `dest` on error. If `dest` is +/// empty, `getrandom` immediately returns success, making no calls to the +/// underlying operating system. +/// +/// Blocking is possible, at least during early boot; see module documentation. +/// +/// In general, `getrandom` will be fast enough for interactive usage, though +/// significantly slower than a user-space CSPRNG; for the latter consider +/// [`rand::thread_rng`](https://docs.rs/rand/*/rand/fn.thread_rng.html). +#[inline] +pub fn getrandom(dest: &mut [u8]) -> Result<(), Error> { + // SAFETY: The `&mut MaybeUninit<_>` reference doesn't escape, and + // `getrandom_uninit` guarantees it will never de-initialize any part of + // `dest`. + getrandom_uninit(unsafe { slice_as_uninit_mut(dest) })?; + Ok(()) +} + +/// Version of the `getrandom` function which fills `dest` with random bytes +/// returns a mutable reference to those bytes. +/// +/// On successful completion this function is guaranteed to return a slice +/// which points to the same memory as `dest` and has the same length. +/// In other words, it's safe to assume that `dest` is initialized after +/// this function has returned `Ok`. +/// +/// No part of `dest` will ever be de-initialized at any point, regardless +/// of what is returned. +/// +/// # Examples +/// +/// ```ignore +/// # // We ignore this test since `uninit_array` is unstable. +/// #![feature(maybe_uninit_uninit_array)] +/// # fn main() -> Result<(), getrandom::Error> { +/// let mut buf = core::mem::MaybeUninit::uninit_array::<1024>(); +/// let buf: &mut [u8] = getrandom::getrandom_uninit(&mut buf)?; +/// # Ok(()) } +/// ``` +#[inline] +pub fn getrandom_uninit(dest: &mut [MaybeUninit]) -> Result<&mut [u8], Error> { + if !dest.is_empty() { + imp::getrandom_inner(dest)?; + } + // SAFETY: `dest` has been fully initialized by `imp::getrandom_inner` + // since it returned `Ok`. + Ok(unsafe { slice_assume_init_mut(dest) }) +} diff --git a/vendor/getrandom/src/linux_android.rs b/vendor/getrandom/src/linux_android.rs new file mode 100644 index 0000000..e81f1e1 --- /dev/null +++ b/vendor/getrandom/src/linux_android.rs @@ -0,0 +1,48 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for Linux / Android +use crate::{ + util::LazyBool, + util_libc::{last_os_error, sys_fill_exact}, + {use_file, Error}, +}; +use core::mem::MaybeUninit; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // getrandom(2) was introduced in Linux 3.17 + static HAS_GETRANDOM: LazyBool = LazyBool::new(); + if HAS_GETRANDOM.unsync_init(is_getrandom_available) { + sys_fill_exact(dest, |buf| unsafe { + getrandom(buf.as_mut_ptr() as *mut libc::c_void, buf.len(), 0) + }) + } else { + use_file::getrandom_inner(dest) + } +} + +fn is_getrandom_available() -> bool { + let res = unsafe { getrandom(core::ptr::null_mut(), 0, libc::GRND_NONBLOCK) }; + if res < 0 { + match last_os_error().raw_os_error() { + Some(libc::ENOSYS) => false, // No kernel support + Some(libc::EPERM) => false, // Blocked by seccomp + _ => true, + } + } else { + true + } +} + +unsafe fn getrandom( + buf: *mut libc::c_void, + buflen: libc::size_t, + flags: libc::c_uint, +) -> libc::ssize_t { + libc::syscall(libc::SYS_getrandom, buf, buflen, flags) as libc::ssize_t +} diff --git a/vendor/getrandom/src/macos.rs b/vendor/getrandom/src/macos.rs new file mode 100644 index 0000000..312f9b2 --- /dev/null +++ b/vendor/getrandom/src/macos.rs @@ -0,0 +1,36 @@ +// Copyright 2019 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for macOS +use crate::{ + use_file, + util_libc::{last_os_error, Weak}, + Error, +}; +use core::mem::{self, MaybeUninit}; + +type GetEntropyFn = unsafe extern "C" fn(*mut u8, libc::size_t) -> libc::c_int; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // getentropy(2) was added in 10.12, Rust supports 10.7+ + static GETENTROPY: Weak = unsafe { Weak::new("getentropy\0") }; + if let Some(fptr) = GETENTROPY.ptr() { + let func: GetEntropyFn = unsafe { mem::transmute(fptr) }; + for chunk in dest.chunks_mut(256) { + let ret = unsafe { func(chunk.as_mut_ptr() as *mut u8, chunk.len()) }; + if ret != 0 { + return Err(last_os_error()); + } + } + Ok(()) + } else { + // We fallback to reading from /dev/random instead of SecRandomCopyBytes + // to avoid high startup costs and linking the Security framework. + use_file::getrandom_inner(dest) + } +} diff --git a/vendor/getrandom/src/openbsd.rs b/vendor/getrandom/src/openbsd.rs new file mode 100644 index 0000000..7a76f61 --- /dev/null +++ b/vendor/getrandom/src/openbsd.rs @@ -0,0 +1,22 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for OpenBSD +use crate::{util_libc::last_os_error, Error}; +use core::mem::MaybeUninit; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // getentropy(2) was added in OpenBSD 5.6, so we can use it unconditionally. + for chunk in dest.chunks_mut(256) { + let ret = unsafe { libc::getentropy(chunk.as_mut_ptr() as *mut libc::c_void, chunk.len()) }; + if ret == -1 { + return Err(last_os_error()); + } + } + Ok(()) +} diff --git a/vendor/getrandom/src/rdrand.rs b/vendor/getrandom/src/rdrand.rs new file mode 100644 index 0000000..69f6a5d --- /dev/null +++ b/vendor/getrandom/src/rdrand.rs @@ -0,0 +1,130 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +use crate::{ + util::{slice_as_uninit, LazyBool}, + Error, +}; +use core::mem::{size_of, MaybeUninit}; + +cfg_if! { + if #[cfg(target_arch = "x86_64")] { + use core::arch::x86_64 as arch; + use arch::_rdrand64_step as rdrand_step; + } else if #[cfg(target_arch = "x86")] { + use core::arch::x86 as arch; + use arch::_rdrand32_step as rdrand_step; + } +} + +// Recommendation from "Intel® Digital Random Number Generator (DRNG) Software +// Implementation Guide" - Section 5.2.1 and "Intel® 64 and IA-32 Architectures +// Software Developer’s Manual" - Volume 1 - Section 7.3.17.1. +const RETRY_LIMIT: usize = 10; + +#[target_feature(enable = "rdrand")] +unsafe fn rdrand() -> Option { + for _ in 0..RETRY_LIMIT { + let mut val = 0; + if rdrand_step(&mut val) == 1 { + return Some(val as usize); + } + } + None +} + +// "rdrand" target feature requires "+rdrand" flag, see https://github.com/rust-lang/rust/issues/49653. +#[cfg(all(target_env = "sgx", not(target_feature = "rdrand")))] +compile_error!( + "SGX targets require 'rdrand' target feature. Enable by using -C target-feature=+rdrand." +); + +// Run a small self-test to make sure we aren't repeating values +// Adapted from Linux's test in arch/x86/kernel/cpu/rdrand.c +// Fails with probability < 2^(-90) on 32-bit systems +#[target_feature(enable = "rdrand")] +unsafe fn self_test() -> bool { + // On AMD, RDRAND returns 0xFF...FF on failure, count it as a collision. + let mut prev = !0; // TODO(MSRV 1.43): Move to usize::MAX + let mut fails = 0; + for _ in 0..8 { + match rdrand() { + Some(val) if val == prev => fails += 1, + Some(val) => prev = val, + None => return false, + }; + } + fails <= 2 +} + +fn is_rdrand_good() -> bool { + #[cfg(not(target_feature = "rdrand"))] + { + // SAFETY: All Rust x86 targets are new enough to have CPUID, and we + // check that leaf 1 is supported before using it. + let cpuid0 = unsafe { arch::__cpuid(0) }; + if cpuid0.eax < 1 { + return false; + } + let cpuid1 = unsafe { arch::__cpuid(1) }; + + let vendor_id = [ + cpuid0.ebx.to_le_bytes(), + cpuid0.edx.to_le_bytes(), + cpuid0.ecx.to_le_bytes(), + ]; + if vendor_id == [*b"Auth", *b"enti", *b"cAMD"] { + let mut family = (cpuid1.eax >> 8) & 0xF; + if family == 0xF { + family += (cpuid1.eax >> 20) & 0xFF; + } + // AMD CPUs families before 17h (Zen) sometimes fail to set CF when + // RDRAND fails after suspend. Don't use RDRAND on those families. + // See https://bugzilla.redhat.com/show_bug.cgi?id=1150286 + if family < 0x17 { + return false; + } + } + + const RDRAND_FLAG: u32 = 1 << 30; + if cpuid1.ecx & RDRAND_FLAG == 0 { + return false; + } + } + + // SAFETY: We have already checked that rdrand is available. + unsafe { self_test() } +} + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + static RDRAND_GOOD: LazyBool = LazyBool::new(); + if !RDRAND_GOOD.unsync_init(is_rdrand_good) { + return Err(Error::NO_RDRAND); + } + // SAFETY: After this point, we know rdrand is supported. + unsafe { rdrand_exact(dest) }.ok_or(Error::FAILED_RDRAND) +} + +// TODO: make this function safe when we have feature(target_feature_11) +#[target_feature(enable = "rdrand")] +unsafe fn rdrand_exact(dest: &mut [MaybeUninit]) -> Option<()> { + // We use chunks_exact_mut instead of chunks_mut as it allows almost all + // calls to memcpy to be elided by the compiler. + let mut chunks = dest.chunks_exact_mut(size_of::()); + for chunk in chunks.by_ref() { + let src = rdrand()?.to_ne_bytes(); + chunk.copy_from_slice(slice_as_uninit(&src)); + } + + let tail = chunks.into_remainder(); + let n = tail.len(); + if n > 0 { + let src = rdrand()?.to_ne_bytes(); + tail.copy_from_slice(slice_as_uninit(&src[..n])); + } + Some(()) +} diff --git a/vendor/getrandom/src/solaris_illumos.rs b/vendor/getrandom/src/solaris_illumos.rs new file mode 100644 index 0000000..501c610 --- /dev/null +++ b/vendor/getrandom/src/solaris_illumos.rs @@ -0,0 +1,49 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for the Solaris family +//! +//! `/dev/random` uses the Hash_DRBG with SHA512 algorithm from NIST SP 800-90A. +//! `/dev/urandom` uses the FIPS 186-2 algorithm, which is considered less +//! secure. We choose to read from `/dev/random` (and use GRND_RANDOM). +//! +//! Solaris 11.3 and late-2018 illumos added the getrandom(2) libc function. +//! To make sure we can compile on both Solaris and its derivatives, as well as +//! function, we check for the existence of getrandom(2) in libc by calling +//! libc::dlsym. +use crate::{ + use_file, + util_libc::{sys_fill_exact, Weak}, + Error, +}; +use core::mem::{self, MaybeUninit}; + +static GETRANDOM: Weak = unsafe { Weak::new("getrandom\0") }; +type GetRandomFn = + unsafe extern "C" fn(*mut libc::c_void, libc::size_t, libc::c_uint) -> libc::ssize_t; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + if let Some(fptr) = GETRANDOM.ptr() { + let func: GetRandomFn = unsafe { mem::transmute(fptr) }; + // 256 bytes is the lowest common denominator across all the Solaris + // derived platforms for atomically obtaining random data. + for chunk in dest.chunks_mut(256) { + sys_fill_exact(chunk, |buf| unsafe { + // A cast is needed for the flags as libc uses the wrong type. + func( + buf.as_mut_ptr() as *mut libc::c_void, + buf.len(), + libc::GRND_RANDOM as libc::c_uint, + ) + })? + } + Ok(()) + } else { + use_file::getrandom_inner(dest) + } +} diff --git a/vendor/getrandom/src/solid.rs b/vendor/getrandom/src/solid.rs new file mode 100644 index 0000000..aeccc4e --- /dev/null +++ b/vendor/getrandom/src/solid.rs @@ -0,0 +1,26 @@ +// Copyright 2021 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for SOLID +use crate::Error; +use core::{mem::MaybeUninit, num::NonZeroU32}; + +extern "C" { + pub fn SOLID_RNG_SampleRandomBytes(buffer: *mut u8, length: usize) -> i32; +} + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + let ret = unsafe { SOLID_RNG_SampleRandomBytes(dest.as_mut_ptr() as *mut u8, dest.len()) }; + if ret >= 0 { + Ok(()) + } else { + // ITRON error numbers are always negative, so we negate it so that it + // falls in the dedicated OS error range (1..INTERNAL_START). + Err(NonZeroU32::new((-ret) as u32).unwrap().into()) + } +} diff --git a/vendor/getrandom/src/use_file.rs b/vendor/getrandom/src/use_file.rs new file mode 100644 index 0000000..a6ef0d2 --- /dev/null +++ b/vendor/getrandom/src/use_file.rs @@ -0,0 +1,138 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementations that just need to read from a file +use crate::{ + util::LazyUsize, + util_libc::{open_readonly, sys_fill_exact}, + Error, +}; +use core::{ + cell::UnsafeCell, + mem::MaybeUninit, + sync::atomic::{AtomicUsize, Ordering::Relaxed}, +}; + +// We prefer using /dev/urandom and only use /dev/random if the OS +// documentation indicates that /dev/urandom is insecure. +// On Solaris/Illumos, see src/solaris_illumos.rs +// On Dragonfly, Haiku, macOS, and QNX Neutrino the devices are identical. +#[cfg(any(target_os = "solaris", target_os = "illumos"))] +const FILE_PATH: &str = "/dev/random\0"; +#[cfg(any( + target_os = "aix", + target_os = "android", + target_os = "linux", + target_os = "redox", + target_os = "dragonfly", + target_os = "haiku", + target_os = "macos", + target_os = "nto", +))] +const FILE_PATH: &str = "/dev/urandom\0"; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + let fd = get_rng_fd()?; + sys_fill_exact(dest, |buf| unsafe { + libc::read(fd, buf.as_mut_ptr() as *mut libc::c_void, buf.len()) + }) +} + +// Returns the file descriptor for the device file used to retrieve random +// bytes. The file will be opened exactly once. All subsequent calls will +// return the same file descriptor. This file descriptor is never closed. +fn get_rng_fd() -> Result { + static FD: AtomicUsize = AtomicUsize::new(LazyUsize::UNINIT); + fn get_fd() -> Option { + match FD.load(Relaxed) { + LazyUsize::UNINIT => None, + val => Some(val as libc::c_int), + } + } + + // Use double-checked locking to avoid acquiring the lock if possible. + if let Some(fd) = get_fd() { + return Ok(fd); + } + + // SAFETY: We use the mutex only in this method, and we always unlock it + // before returning, making sure we don't violate the pthread_mutex_t API. + static MUTEX: Mutex = Mutex::new(); + unsafe { MUTEX.lock() }; + let _guard = DropGuard(|| unsafe { MUTEX.unlock() }); + + if let Some(fd) = get_fd() { + return Ok(fd); + } + + // On Linux, /dev/urandom might return insecure values. + #[cfg(any(target_os = "android", target_os = "linux"))] + wait_until_rng_ready()?; + + let fd = unsafe { open_readonly(FILE_PATH)? }; + // The fd always fits in a usize without conflicting with UNINIT. + debug_assert!(fd >= 0 && (fd as usize) < LazyUsize::UNINIT); + FD.store(fd as usize, Relaxed); + + Ok(fd) +} + +// Succeeds once /dev/urandom is safe to read from +#[cfg(any(target_os = "android", target_os = "linux"))] +fn wait_until_rng_ready() -> Result<(), Error> { + // Poll /dev/random to make sure it is ok to read from /dev/urandom. + let fd = unsafe { open_readonly("/dev/random\0")? }; + let mut pfd = libc::pollfd { + fd, + events: libc::POLLIN, + revents: 0, + }; + let _guard = DropGuard(|| unsafe { + libc::close(fd); + }); + + loop { + // A negative timeout means an infinite timeout. + let res = unsafe { libc::poll(&mut pfd, 1, -1) }; + if res >= 0 { + debug_assert_eq!(res, 1); // We only used one fd, and cannot timeout. + return Ok(()); + } + let err = crate::util_libc::last_os_error(); + match err.raw_os_error() { + Some(libc::EINTR) | Some(libc::EAGAIN) => continue, + _ => return Err(err), + } + } +} + +struct Mutex(UnsafeCell); + +impl Mutex { + const fn new() -> Self { + Self(UnsafeCell::new(libc::PTHREAD_MUTEX_INITIALIZER)) + } + unsafe fn lock(&self) { + let r = libc::pthread_mutex_lock(self.0.get()); + debug_assert_eq!(r, 0); + } + unsafe fn unlock(&self) { + let r = libc::pthread_mutex_unlock(self.0.get()); + debug_assert_eq!(r, 0); + } +} + +unsafe impl Sync for Mutex {} + +struct DropGuard(F); + +impl Drop for DropGuard { + fn drop(&mut self) { + self.0() + } +} diff --git a/vendor/getrandom/src/util.rs b/vendor/getrandom/src/util.rs new file mode 100644 index 0000000..3162afa --- /dev/null +++ b/vendor/getrandom/src/util.rs @@ -0,0 +1,101 @@ +// Copyright 2019 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![allow(dead_code)] +use core::{ + mem::MaybeUninit, + ptr, + sync::atomic::{AtomicUsize, Ordering::Relaxed}, +}; + +// This structure represents a lazily initialized static usize value. Useful +// when it is preferable to just rerun initialization instead of locking. +// Both unsync_init and sync_init will invoke an init() function until it +// succeeds, then return the cached value for future calls. +// +// Both methods support init() "failing". If the init() method returns UNINIT, +// that value will be returned as normal, but will not be cached. +// +// Users should only depend on the _value_ returned by init() functions. +// Specifically, for the following init() function: +// fn init() -> usize { +// a(); +// let v = b(); +// c(); +// v +// } +// the effects of c() or writes to shared memory will not necessarily be +// observed and additional synchronization methods with be needed. +pub struct LazyUsize(AtomicUsize); + +impl LazyUsize { + pub const fn new() -> Self { + Self(AtomicUsize::new(Self::UNINIT)) + } + + // The initialization is not completed. + pub const UNINIT: usize = usize::max_value(); + + // Runs the init() function at least once, returning the value of some run + // of init(). Multiple callers can run their init() functions in parallel. + // init() should always return the same value, if it succeeds. + pub fn unsync_init(&self, init: impl FnOnce() -> usize) -> usize { + // Relaxed ordering is fine, as we only have a single atomic variable. + let mut val = self.0.load(Relaxed); + if val == Self::UNINIT { + val = init(); + self.0.store(val, Relaxed); + } + val + } +} + +// Identical to LazyUsize except with bool instead of usize. +pub struct LazyBool(LazyUsize); + +impl LazyBool { + pub const fn new() -> Self { + Self(LazyUsize::new()) + } + + pub fn unsync_init(&self, init: impl FnOnce() -> bool) -> bool { + self.0.unsync_init(|| init() as usize) != 0 + } +} + +/// Polyfill for `maybe_uninit_slice` feature's +/// `MaybeUninit::slice_assume_init_mut`. Every element of `slice` must have +/// been initialized. +#[inline(always)] +pub unsafe fn slice_assume_init_mut(slice: &mut [MaybeUninit]) -> &mut [T] { + // SAFETY: `MaybeUninit` is guaranteed to be layout-compatible with `T`. + &mut *(slice as *mut [MaybeUninit] as *mut [T]) +} + +#[inline] +pub fn uninit_slice_fill_zero(slice: &mut [MaybeUninit]) -> &mut [u8] { + unsafe { ptr::write_bytes(slice.as_mut_ptr(), 0, slice.len()) }; + unsafe { slice_assume_init_mut(slice) } +} + +#[inline(always)] +pub fn slice_as_uninit(slice: &[T]) -> &[MaybeUninit] { + // SAFETY: `MaybeUninit` is guaranteed to be layout-compatible with `T`. + // There is no risk of writing a `MaybeUninit` into the result since + // the result isn't mutable. + unsafe { &*(slice as *const [T] as *const [MaybeUninit]) } +} + +/// View an mutable initialized array as potentially-uninitialized. +/// +/// This is unsafe because it allows assigning uninitialized values into +/// `slice`, which would be undefined behavior. +#[inline(always)] +pub unsafe fn slice_as_uninit_mut(slice: &mut [T]) -> &mut [MaybeUninit] { + // SAFETY: `MaybeUninit` is guaranteed to be layout-compatible with `T`. + &mut *(slice as *mut [T] as *mut [MaybeUninit]) +} diff --git a/vendor/getrandom/src/util_libc.rs b/vendor/getrandom/src/util_libc.rs new file mode 100644 index 0000000..99bee38 --- /dev/null +++ b/vendor/getrandom/src/util_libc.rs @@ -0,0 +1,159 @@ +// Copyright 2019 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![allow(dead_code)] +use crate::Error; +use core::{ + cmp::min, + mem::MaybeUninit, + num::NonZeroU32, + ptr::NonNull, + sync::atomic::{fence, AtomicPtr, Ordering}, +}; +use libc::c_void; + +cfg_if! { + if #[cfg(any(target_os = "netbsd", target_os = "openbsd", target_os = "android"))] { + use libc::__errno as errno_location; + } else if #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "hurd", target_os = "redox"))] { + use libc::__errno_location as errno_location; + } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] { + use libc::___errno as errno_location; + } else if #[cfg(any(target_os = "macos", target_os = "freebsd"))] { + use libc::__error as errno_location; + } else if #[cfg(target_os = "haiku")] { + use libc::_errnop as errno_location; + } else if #[cfg(target_os = "nto")] { + use libc::__get_errno_ptr as errno_location; + } else if #[cfg(any(all(target_os = "horizon", target_arch = "arm"), target_os = "vita"))] { + extern "C" { + // Not provided by libc: https://github.com/rust-lang/libc/issues/1995 + fn __errno() -> *mut libc::c_int; + } + use __errno as errno_location; + } else if #[cfg(target_os = "aix")] { + use libc::_Errno as errno_location; + } +} + +cfg_if! { + if #[cfg(target_os = "vxworks")] { + use libc::errnoGet as get_errno; + } else if #[cfg(target_os = "dragonfly")] { + // Until rust-lang/rust#29594 is stable, we cannot get the errno value + // on DragonFlyBSD. So we just return an out-of-range errno. + unsafe fn get_errno() -> libc::c_int { -1 } + } else { + unsafe fn get_errno() -> libc::c_int { *errno_location() } + } +} + +pub fn last_os_error() -> Error { + let errno = unsafe { get_errno() }; + if errno > 0 { + Error::from(NonZeroU32::new(errno as u32).unwrap()) + } else { + Error::ERRNO_NOT_POSITIVE + } +} + +// Fill a buffer by repeatedly invoking a system call. The `sys_fill` function: +// - should return -1 and set errno on failure +// - should return the number of bytes written on success +pub fn sys_fill_exact( + mut buf: &mut [MaybeUninit], + sys_fill: impl Fn(&mut [MaybeUninit]) -> libc::ssize_t, +) -> Result<(), Error> { + while !buf.is_empty() { + let res = sys_fill(buf); + if res < 0 { + let err = last_os_error(); + // We should try again if the call was interrupted. + if err.raw_os_error() != Some(libc::EINTR) { + return Err(err); + } + } else { + // We don't check for EOF (ret = 0) as the data we are reading + // should be an infinite stream of random bytes. + let len = min(res as usize, buf.len()); + buf = &mut buf[len..]; + } + } + Ok(()) +} + +// A "weak" binding to a C function that may or may not be present at runtime. +// Used for supporting newer OS features while still building on older systems. +// Based off of the DlsymWeak struct in libstd: +// https://github.com/rust-lang/rust/blob/1.61.0/library/std/src/sys/unix/weak.rs#L84 +// except that the caller must manually cast self.ptr() to a function pointer. +pub struct Weak { + name: &'static str, + addr: AtomicPtr, +} + +impl Weak { + // A non-null pointer value which indicates we are uninitialized. This + // constant should ideally not be a valid address of a function pointer. + // However, if by chance libc::dlsym does return UNINIT, there will not + // be undefined behavior. libc::dlsym will just be called each time ptr() + // is called. This would be inefficient, but correct. + // TODO: Replace with core::ptr::invalid_mut(1) when that is stable. + const UNINIT: *mut c_void = 1 as *mut c_void; + + // Construct a binding to a C function with a given name. This function is + // unsafe because `name` _must_ be null terminated. + pub const unsafe fn new(name: &'static str) -> Self { + Self { + name, + addr: AtomicPtr::new(Self::UNINIT), + } + } + + // Return the address of a function if present at runtime. Otherwise, + // return None. Multiple callers can call ptr() concurrently. It will + // always return _some_ value returned by libc::dlsym. However, the + // dlsym function may be called multiple times. + pub fn ptr(&self) -> Option> { + // Despite having only a single atomic variable (self.addr), we still + // cannot always use Ordering::Relaxed, as we need to make sure a + // successful call to dlsym() is "ordered before" any data read through + // the returned pointer (which occurs when the function is called). + // Our implementation mirrors that of the one in libstd, meaning that + // the use of non-Relaxed operations is probably unnecessary. + match self.addr.load(Ordering::Relaxed) { + Self::UNINIT => { + let symbol = self.name.as_ptr() as *const _; + let addr = unsafe { libc::dlsym(libc::RTLD_DEFAULT, symbol) }; + // Synchronizes with the Acquire fence below + self.addr.store(addr, Ordering::Release); + NonNull::new(addr) + } + addr => { + let func = NonNull::new(addr)?; + fence(Ordering::Acquire); + Some(func) + } + } + } +} + +// SAFETY: path must be null terminated, FD must be manually closed. +pub unsafe fn open_readonly(path: &str) -> Result { + debug_assert_eq!(path.as_bytes().last(), Some(&0)); + loop { + let fd = libc::open(path.as_ptr() as *const _, libc::O_RDONLY | libc::O_CLOEXEC); + if fd >= 0 { + return Ok(fd); + } + let err = last_os_error(); + // We should try again if open() was interrupted. + if err.raw_os_error() != Some(libc::EINTR) { + return Err(err); + } + } +} diff --git a/vendor/getrandom/src/vita.rs b/vendor/getrandom/src/vita.rs new file mode 100644 index 0000000..4f19b9c --- /dev/null +++ b/vendor/getrandom/src/vita.rs @@ -0,0 +1,21 @@ +// Copyright 2021 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for PS Vita +use crate::{util_libc::last_os_error, Error}; +use core::mem::MaybeUninit; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + for chunk in dest.chunks_mut(256) { + let ret = unsafe { libc::getentropy(chunk.as_mut_ptr() as *mut libc::c_void, chunk.len()) }; + if ret == -1 { + return Err(last_os_error()); + } + } + Ok(()) +} diff --git a/vendor/getrandom/src/vxworks.rs b/vendor/getrandom/src/vxworks.rs new file mode 100644 index 0000000..9b2090f --- /dev/null +++ b/vendor/getrandom/src/vxworks.rs @@ -0,0 +1,37 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for VxWorks +use crate::{util_libc::last_os_error, Error}; +use core::{ + mem::MaybeUninit, + sync::atomic::{AtomicBool, Ordering::Relaxed}, +}; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + static RNG_INIT: AtomicBool = AtomicBool::new(false); + while !RNG_INIT.load(Relaxed) { + let ret = unsafe { libc::randSecure() }; + if ret < 0 { + return Err(Error::VXWORKS_RAND_SECURE); + } else if ret > 0 { + RNG_INIT.store(true, Relaxed); + break; + } + unsafe { libc::usleep(10) }; + } + + // Prevent overflow of i32 + for chunk in dest.chunks_mut(i32::max_value() as usize) { + let ret = unsafe { libc::randABytes(chunk.as_mut_ptr() as *mut u8, chunk.len() as i32) }; + if ret != 0 { + return Err(last_os_error()); + } + } + Ok(()) +} diff --git a/vendor/getrandom/src/wasi.rs b/vendor/getrandom/src/wasi.rs new file mode 100644 index 0000000..9276ee7 --- /dev/null +++ b/vendor/getrandom/src/wasi.rs @@ -0,0 +1,25 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Implementation for WASI +use crate::Error; +use core::{ + mem::MaybeUninit, + num::{NonZeroU16, NonZeroU32}, +}; +use wasi::random_get; + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + unsafe { random_get(dest.as_mut_ptr() as *mut u8, dest.len()) }.map_err(|e| { + // The WASI errno will always be non-zero, but we check just in case. + match NonZeroU16::new(e.raw()) { + Some(r) => Error::from(NonZeroU32::from(r)), + None => Error::ERRNO_NOT_POSITIVE, + } + }) +} diff --git a/vendor/getrandom/src/windows.rs b/vendor/getrandom/src/windows.rs new file mode 100644 index 0000000..92d7042 --- /dev/null +++ b/vendor/getrandom/src/windows.rs @@ -0,0 +1,66 @@ +// Copyright 2018 Developers of the Rand project. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use crate::Error; +use core::{ffi::c_void, mem::MaybeUninit, num::NonZeroU32, ptr}; + +const BCRYPT_USE_SYSTEM_PREFERRED_RNG: u32 = 0x00000002; + +#[link(name = "bcrypt")] +extern "system" { + fn BCryptGenRandom( + hAlgorithm: *mut c_void, + pBuffer: *mut u8, + cbBuffer: u32, + dwFlags: u32, + ) -> u32; +} + +// Forbidden when targetting UWP +#[cfg(not(target_vendor = "uwp"))] +#[link(name = "advapi32")] +extern "system" { + #[link_name = "SystemFunction036"] + fn RtlGenRandom(RandomBuffer: *mut c_void, RandomBufferLength: u32) -> u8; +} + +pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { + // Prevent overflow of u32 + for chunk in dest.chunks_mut(u32::max_value() as usize) { + // BCryptGenRandom was introduced in Windows Vista + let ret = unsafe { + BCryptGenRandom( + ptr::null_mut(), + chunk.as_mut_ptr() as *mut u8, + chunk.len() as u32, + BCRYPT_USE_SYSTEM_PREFERRED_RNG, + ) + }; + // NTSTATUS codes use the two highest bits for severity status. + if ret >> 30 == 0b11 { + // Failed. Try RtlGenRandom as a fallback. + #[cfg(not(target_vendor = "uwp"))] + { + let ret = + unsafe { RtlGenRandom(chunk.as_mut_ptr() as *mut c_void, chunk.len() as u32) }; + if ret != 0 { + continue; + } + } + // We zeroize the highest bit, so the error code will reside + // inside the range designated for OS codes. + let code = ret ^ (1 << 31); + // SAFETY: the second highest bit is always equal to one, + // so it's impossible to get zero. Unfortunately the type + // system does not have a way to express this yet. + let code = unsafe { NonZeroU32::new_unchecked(code) }; + return Err(Error::from(code)); + } + } + Ok(()) +} diff --git a/vendor/getrandom/tests/common/mod.rs b/vendor/getrandom/tests/common/mod.rs new file mode 100644 index 0000000..666f7f5 --- /dev/null +++ b/vendor/getrandom/tests/common/mod.rs @@ -0,0 +1,100 @@ +use super::getrandom_impl; + +#[cfg(all(target_arch = "wasm32", target_os = "unknown"))] +use wasm_bindgen_test::wasm_bindgen_test as test; + +#[cfg(feature = "test-in-browser")] +wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); + +#[test] +fn test_zero() { + // Test that APIs are happy with zero-length requests + getrandom_impl(&mut [0u8; 0]).unwrap(); +} + +// Return the number of bits in which s1 and s2 differ +#[cfg(not(feature = "custom"))] +fn num_diff_bits(s1: &[u8], s2: &[u8]) -> usize { + assert_eq!(s1.len(), s2.len()); + s1.iter() + .zip(s2.iter()) + .map(|(a, b)| (a ^ b).count_ones() as usize) + .sum() +} + +// Tests the quality of calling getrandom on two large buffers +#[test] +#[cfg(not(feature = "custom"))] +fn test_diff() { + let mut v1 = [0u8; 1000]; + getrandom_impl(&mut v1).unwrap(); + + let mut v2 = [0u8; 1000]; + getrandom_impl(&mut v2).unwrap(); + + // Between 3.5 and 4.5 bits per byte should differ. Probability of failure: + // ~ 2^(-94) = 2 * CDF[BinomialDistribution[8000, 0.5], 3500] + let d = num_diff_bits(&v1, &v2); + assert!(d > 3500); + assert!(d < 4500); +} + +// Tests the quality of calling getrandom repeatedly on small buffers +#[test] +#[cfg(not(feature = "custom"))] +fn test_small() { + // For each buffer size, get at least 256 bytes and check that between + // 3 and 5 bits per byte differ. Probability of failure: + // ~ 2^(-91) = 64 * 2 * CDF[BinomialDistribution[8*256, 0.5], 3*256] + for size in 1..=64 { + let mut num_bytes = 0; + let mut diff_bits = 0; + while num_bytes < 256 { + let mut s1 = vec![0u8; size]; + getrandom_impl(&mut s1).unwrap(); + let mut s2 = vec![0u8; size]; + getrandom_impl(&mut s2).unwrap(); + + num_bytes += size; + diff_bits += num_diff_bits(&s1, &s2); + } + assert!(diff_bits > 3 * num_bytes); + assert!(diff_bits < 5 * num_bytes); + } +} + +#[test] +fn test_huge() { + let mut huge = [0u8; 100_000]; + getrandom_impl(&mut huge).unwrap(); +} + +// On WASM, the thread API always fails/panics +#[cfg(not(target_arch = "wasm32"))] +#[test] +fn test_multithreading() { + extern crate std; + use std::{sync::mpsc::channel, thread, vec}; + + let mut txs = vec![]; + for _ in 0..20 { + let (tx, rx) = channel(); + txs.push(tx); + + thread::spawn(move || { + // wait until all the tasks are ready to go. + rx.recv().unwrap(); + let mut v = [0u8; 1000]; + + for _ in 0..100 { + getrandom_impl(&mut v).unwrap(); + thread::yield_now(); + } + }); + } + + // start all the tasks + for tx in txs.iter() { + tx.send(()).unwrap(); + } +} diff --git a/vendor/getrandom/tests/custom.rs b/vendor/getrandom/tests/custom.rs new file mode 100644 index 0000000..b085094 --- /dev/null +++ b/vendor/getrandom/tests/custom.rs @@ -0,0 +1,54 @@ +// Test that a custom handler works on wasm32-unknown-unknown +#![cfg(all( + target_arch = "wasm32", + target_os = "unknown", + feature = "custom", + not(feature = "js") +))] + +use wasm_bindgen_test::wasm_bindgen_test as test; + +use core::num::NonZeroU32; +use getrandom::{getrandom, register_custom_getrandom, Error}; + +fn len7_err() -> Error { + NonZeroU32::new(Error::INTERNAL_START + 7).unwrap().into() +} + +fn super_insecure_rng(buf: &mut [u8]) -> Result<(), Error> { + // `getrandom` guarantees it will not call any implementation if the output + // buffer is empty. + assert!(!buf.is_empty()); + // Length 7 buffers return a custom error + if buf.len() == 7 { + return Err(len7_err()); + } + // Otherwise, fill bytes based on input length + let mut start = buf.len() as u8; + for b in buf { + *b = start; + start = start.wrapping_mul(3); + } + Ok(()) +} + +register_custom_getrandom!(super_insecure_rng); + +use getrandom::getrandom as getrandom_impl; +mod common; + +#[test] +fn custom_rng_output() { + let mut buf = [0u8; 4]; + assert_eq!(getrandom(&mut buf), Ok(())); + assert_eq!(buf, [4, 12, 36, 108]); + + let mut buf = [0u8; 3]; + assert_eq!(getrandom(&mut buf), Ok(())); + assert_eq!(buf, [3, 9, 27]); +} + +#[test] +fn rng_err_output() { + assert_eq!(getrandom(&mut [0; 7]), Err(len7_err())); +} diff --git a/vendor/getrandom/tests/normal.rs b/vendor/getrandom/tests/normal.rs new file mode 100644 index 0000000..5fff13b --- /dev/null +++ b/vendor/getrandom/tests/normal.rs @@ -0,0 +1,11 @@ +// Don't test on custom wasm32-unknown-unknown +#![cfg(not(all( + target_arch = "wasm32", + target_os = "unknown", + feature = "custom", + not(feature = "js") +)))] + +// Use the normal getrandom implementation on this architecture. +use getrandom::getrandom as getrandom_impl; +mod common; diff --git a/vendor/getrandom/tests/rdrand.rs b/vendor/getrandom/tests/rdrand.rs new file mode 100644 index 0000000..2567868 --- /dev/null +++ b/vendor/getrandom/tests/rdrand.rs @@ -0,0 +1,20 @@ +// We only test the RDRAND-based RNG source on supported architectures. +#![cfg(any(target_arch = "x86_64", target_arch = "x86"))] + +// rdrand.rs expects to be part of the getrandom main crate, so we need these +// additional imports to get rdrand.rs to compile. +use getrandom::Error; +#[macro_use] +extern crate cfg_if; +#[path = "../src/rdrand.rs"] +mod rdrand; +#[path = "../src/util.rs"] +mod util; + +// The rdrand implementation has the signature of getrandom_uninit(), but our +// tests expect getrandom_impl() to have the signature of getrandom(). +fn getrandom_impl(dest: &mut [u8]) -> Result<(), Error> { + rdrand::getrandom_inner(unsafe { util::slice_as_uninit_mut(dest) })?; + Ok(()) +} +mod common; diff --git a/vendor/ordered-multimap/.cargo-checksum.json b/vendor/ordered-multimap/.cargo-checksum.json new file mode 100644 index 0000000..e28e2c5 --- /dev/null +++ b/vendor/ordered-multimap/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"addf5766dd985eecd129abf122eee8fdbb0c490f365edb9254aa0f9195895d7c","CONTRIBUTING.md":"c67c1ce7c5f0d1aa5009db0ba42c6a32da2e2d5a6b517f38c0308d8ffa5ae083","Cargo.toml":"60ed14a557a877cbf8cee01f1e76541f094d5c5e875a4ff5d2b824b20c10b69a","LICENSE":"047c1d2f1c28c30ced89bd0740ff251d8f51512e81b142711f958a0551729ec4","README.md":"71553048ddfb5a2ba5817c4c8bea138b83bc75c953d289637110bde93e4f2125","codecov.yml":"550982ef37ab56e6c3e06351f359f2407855a54c27f7e9c7871b855aa34c9109","rustfmt.toml":"9d197f8ce3b24c6aa98627d614420d5291fde7c5442cf77a7f8718dc9375f361","src/lib.rs":"357fa15e37bb947d61acb8224fcaddc23087cf88ae96b730966898fc9cedfaed","src/list_ordered_multimap.rs":"dee8047febf734701197c7aa5220bdb6ff33a63d1f515263e982e565842c76b1","src/serde.rs":"aa0db90076cc20dffe26677e8fa00437dc3fc84b9cf38b469836f99a1b010b89"},"package":"4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e"} \ No newline at end of file diff --git a/vendor/ordered-multimap/CHANGELOG.md b/vendor/ordered-multimap/CHANGELOG.md new file mode 100644 index 0000000..1e7d092 --- /dev/null +++ b/vendor/ordered-multimap/CHANGELOG.md @@ -0,0 +1,110 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to +[Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +# 0.6.0 - 01-21-2023 + +### Added + + - Added support for `no_std` - @notgull + +### Changed + + - Updated `dlv-list` dependency to `0.5.0`. + - Updated `hashbrown` dependency to `0.13.2`. + +# 0.5.0 - 08-25-2022 + +### Changed + + - Loosened bounds required on some functions. + - Updated `dlv-list` dependency to `0.4.0`. + +### Fixed + + - `serde` implementation now correctly works as a multimap. + +# 0.4.3 + +### Changed + + - Updated `hashbrown` dependency to `0.12.0`. + +# 0.4.2 + +### Changed + + - Updated `dlv-list` dependency to `0.3.0`. This is not a breaking change as it's not user visible. + +# 0.4.1 + +### Changed + + - Updated `dlv-list` dependency to `0.2.4`. + - Updated `hashbrown` dependency to `0.11.0`. + +# 0.4.0 + +### Removed + + - Removed `drain_pairs` as it's unsafe. + +### Fixed + + - Fixed miri issues with `retain`. + +# 0.3.1 + +### Added + + - Added crate feature `serde` for (de)serialization. + - Implemented `IntoIterator` of owned key-value pairs for `ListOrderedMultimap`. + +# 0.3.0 + +### Changed + + - Updated `hashbrown` dependency to `0.9.0`. + +# 0.2.4 + +### Changed + + - Updated `dlv-list` dependency to `0.2.2`. + - Updated `hashbrown` dependency to `0.7.0`. + +# 0.2.3 + +### Changed + + - Works on stable Rust. + - Updated `hashbrown` dependency to `0.6.0`. + +# 0.2.2 + +### Fixed + + - Fix crate as it was broken from std's migration to hashbrown. + +# 0.2.1 + +### Changed + + - Update dependency on `dlv-list` which will reduce memory size of `ListOrderedMultimap` by 48 bytes. + +# 0.2.0 + +### Added + + - Initial release. + +# 0.1.0 + +### Removed + + - Version was yanked due to critical design flaw. diff --git a/vendor/ordered-multimap/CONTRIBUTING.md b/vendor/ordered-multimap/CONTRIBUTING.md new file mode 100644 index 0000000..f0286c5 --- /dev/null +++ b/vendor/ordered-multimap/CONTRIBUTING.md @@ -0,0 +1,74 @@ +# Contribution guidelines + +First off, thank you for considering contributing to ordered_multimap. + +If your contribution is not straightforward, please first discuss the change you wish to make by creating a new issue +before making the change. + +## Reporting issues + +Before reporting an issue on the [issue tracker](https://github.com/sgodwincs/ordered_multimap/issues), please +check that it has not already been reported by searching for some related keywords. + +## Pull requests + +Try to do one pull request per change. + +### Updating the changelog + +Update the changes you have made in +[CHANGELOG](https://github.com/sgodwincs/ordered_multimap/blob/main/CHANGELOG.md) +file under the **Unreleased** section. + +Add the changes of your pull request to one of the following subsections, depending on the types of changes defined by +[Keep a changelog](https://keepachangelog.com/en/1.0.0/): + +- `Added` for new features. +- `Changed` for changes in existing functionality. +- `Deprecated` for soon-to-be removed features. +- `Removed` for now removed features. +- `Fixed` for any bug fixes. +- `Security` in case of vulnerabilities. + +If the required subsection does not exist yet under **Unreleased**, create it! + +## Developing + +### Set up + +This is no different than other Rust projects. + +```shell +git clone https://github.com/sgodwincs/ordered_multimap +cd ordered_multimap +cargo test +``` + +### Useful Commands +- Run Clippy: + + ```shell + cargo clippy --all-targets --all-features --workspace + ``` + +- Run all tests: + + ```shell + cargo test --all-features --workspace + ``` + +- Check to see if there are code formatting issues + + ```shell + cargo fmt --all -- --check + ``` + +- Format the code in the project + + ```shell + cargo fmt --all + ``` + +## Code of Conduct + +This project adheres to the Rust Code of Conduct, which can be found [here](https://www.rust-lang.org/conduct.html). diff --git a/vendor/ordered-multimap/Cargo.toml b/vendor/ordered-multimap/Cargo.toml new file mode 100644 index 0000000..cdc196c --- /dev/null +++ b/vendor/ordered-multimap/Cargo.toml @@ -0,0 +1,44 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +name = "ordered-multimap" +version = "0.6.0" +authors = ["Scott Godwin "] +description = "Insertion ordered multimap" +readme = "README.md" +categories = ["data-structures"] +license = "MIT" +repository = "https://github.com/sgodwincs/ordered-multimap-rs" + +[dependencies.dlv-list] +version = "0.5" +default-features = false + +[dependencies.hashbrown] +version = "0.13.2" +default-features = false + +[dependencies.serde] +version = "1" +optional = true +default-features = false + +[dev-dependencies.coverage-helper] +version = "0.1.0" + +[dev-dependencies.serde_test] +version = "1.0.144" + +[features] +default = ["std"] +std = ["dlv-list/std"] diff --git a/vendor/ordered-multimap/LICENSE b/vendor/ordered-multimap/LICENSE new file mode 100644 index 0000000..02eba8f --- /dev/null +++ b/vendor/ordered-multimap/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 sgodwincs + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/ordered-multimap/README.md b/vendor/ordered-multimap/README.md new file mode 100644 index 0000000..22e6451 --- /dev/null +++ b/vendor/ordered-multimap/README.md @@ -0,0 +1,38 @@ +# ordered-multimap-rs + +[![Crates.io](https://img.shields.io/crates/v/ordered-multimap.svg)](https://crates.io/crates/ordered-multimap) +[![Docs.rs](https://docs.rs/ordered-multimap/badge.svg)](https://docs.rs/ordered-multimap) +[![CI](https://github.com/sgodwincs/ordered-multimap-rs/workflows/CI/badge.svg)](https://github.com/sgodwincs/ordered-multimap-rs/actions) + +Currently, this crate contains a single type `ListOrderedMultimap`. This is a multimap meaning that +multiple values can be associated with a given key, but it also maintains insertion order across all +keys and values. + +[Documentation](https://docs.rs/ordered-multimap/) + +## Performance + +Basic benchmarks show that the performance of this crate is on par with that of the +[multimap](https://crates.io/crates/multimap) crate which does not maintain insertion order. + +## Features + + - `std` (default) enables usage of the standard library. Disabling this features allows this crate to be used in `no_std` environments. + - `serde` for (de)serialization. + +## TODO + +It is planned that a corresponding `SetOrderedMultimap` will also be included in this crate which +will provide the same insertion order guarantees, but the set of values associated to a given key +will be an actual set instead of a list. + +## License + +Licensed under MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT). + +## Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you +shall be licensed as above, without any additional terms or conditions. + +See [CONTRIBUTING.md](CONTRIBUTING.md). diff --git a/vendor/ordered-multimap/codecov.yml b/vendor/ordered-multimap/codecov.yml new file mode 100644 index 0000000..d898005 --- /dev/null +++ b/vendor/ordered-multimap/codecov.yml @@ -0,0 +1,5 @@ +coverage: + status: + project: + default: + target: 95% diff --git a/vendor/ordered-multimap/rustfmt.toml b/vendor/ordered-multimap/rustfmt.toml new file mode 100644 index 0000000..723f864 --- /dev/null +++ b/vendor/ordered-multimap/rustfmt.toml @@ -0,0 +1,2 @@ +imports_granularity = "Crate" +tab_spaces = 2 diff --git a/vendor/ordered-multimap/src/lib.rs b/vendor/ordered-multimap/src/lib.rs new file mode 100644 index 0000000..06244a9 --- /dev/null +++ b/vendor/ordered-multimap/src/lib.rs @@ -0,0 +1,16 @@ +//! This crate provides a type [`ListOrderedMultimap`] which is a multimap that maintains insertion order across all +//! keys and values. +//! +//! See the type documentation for more information. + +#![cfg_attr(coverage_nightly, feature(no_coverage))] +#![cfg_attr(not(feature = "std"), no_std)] + +extern crate alloc; + +pub mod list_ordered_multimap; + +pub use self::list_ordered_multimap::ListOrderedMultimap; + +#[cfg(feature = "serde")] +mod serde; diff --git a/vendor/ordered-multimap/src/list_ordered_multimap.rs b/vendor/ordered-multimap/src/list_ordered_multimap.rs new file mode 100644 index 0000000..08235a6 --- /dev/null +++ b/vendor/ordered-multimap/src/list_ordered_multimap.rs @@ -0,0 +1,5383 @@ +//! Provides types related to the usage of [`ListOrderedMultimap`]. + +#![allow(unsafe_code)] + +use alloc::vec; +use core::{ + borrow::Borrow, + fmt::{self, Debug, Formatter}, + hash::{BuildHasher, Hash, Hasher}, + iter::{FromIterator, FusedIterator}, + marker::PhantomData, +}; + +use dlv_list::{ + Index, IntoIter as VecListIntoIter, Iter as VecListIter, IterMut as VecListIterMut, VecList, +}; +use hashbrown::{ + hash_map::{RawEntryMut, RawOccupiedEntryMut}, + HashMap, +}; + +/// A random state to use for the hashmap in the multimap. +#[cfg(feature = "std")] +pub type RandomState = std::collections::hash_map::RandomState; + +/// A random state to use for the hashmap in the multimap. +#[cfg(not(feature = "std"))] +#[derive(Debug)] +pub struct RandomState(core::convert::Infallible); + +#[cfg(not(feature = "std"))] +impl RandomState { + /// Creates a new random state. + #[cfg_attr(mutants, mutants::skip)] + #[must_use] + pub fn new() -> RandomState { + panic!("RandomState is not available without std") + } +} + +#[cfg(not(feature = "std"))] +impl Default for RandomState { + #[cfg_attr(mutants, mutants::skip)] + fn default() -> RandomState { + RandomState::new() + } +} + +#[cfg(not(feature = "std"))] +impl BuildHasher for RandomState { + type Hasher = DummyHasher; + + #[cfg_attr(mutants, mutants::skip)] + fn build_hasher(&self) -> Self::Hasher { + match self.0 {} + } +} + +#[derive(Clone)] +/// A multimap that associates with each key a list of values. +/// +/// # Ordering +/// +/// The primary guarantee this type gives is that regardless of what you do to the multimap, you are always able to +/// iterate through all keys and values in the order they were inserted. Values can be iterated by their insertion order +/// either for a specific key or for the entire map. +/// +/// # Allocations +/// +/// Allocations may be performed on any key-value insertion. +pub struct ListOrderedMultimap { + /// The hasher builder that constructs new hashers for hashing keys. We have to keep this separate from the hashmap + /// itself as we need to be able to access it when the hashmap keys are reallocated due to changes. We cannot use the + /// hash of the actual keys in the map as those hashes are not representative. + pub(crate) build_hasher: State, + + /// The list of the keys in the multimap. This is ordered by time of insertion. + pub(crate) keys: VecList, + + /// The map from indices of keys to the indices of their values in the value list. The list of the indices is ordered + /// by time of insertion. We never use hasher of the hashmap explicitly here, we instead use + /// [`ListOrderedMultimap::build_hasher`]. + pub(crate) map: HashMap, MapEntry, DummyState>, + + /// The list of the values in the multimap. This is ordered by time of insertion. + pub(crate) values: VecList>, +} + +#[cfg(feature = "std")] +impl ListOrderedMultimap { + /// Creates a new multimap with no initial capacity. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key1", "value1"); + /// assert_eq!(map.get(&"key1"), Some(&"value1")); + /// ``` + #[must_use] + pub fn new() -> ListOrderedMultimap { + ListOrderedMultimap { + build_hasher: RandomState::new(), + keys: VecList::new(), + map: HashMap::with_hasher(DummyState), + values: VecList::new(), + } + } + + /// Creates a new multimap with the specified capacities. + /// + /// The multimap will be able to hold at least `key_capacity` keys and `value_capacity` values without reallocating. + /// A capacity of 0 will result in no allocation for the respective container. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + /// assert_eq!(map.keys_capacity(), 0); + /// assert_eq!(map.values_capacity(), 0); + /// + /// let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::with_capacity(5, 10); + /// assert_eq!(map.keys_capacity(), 5); + /// assert_eq!(map.values_capacity(), 10); + /// ``` + #[must_use] + pub fn with_capacity( + key_capacity: usize, + value_capacity: usize, + ) -> ListOrderedMultimap { + ListOrderedMultimap { + build_hasher: RandomState::new(), + keys: VecList::with_capacity(key_capacity), + map: HashMap::with_capacity_and_hasher(key_capacity, DummyState), + values: VecList::with_capacity(value_capacity), + } + } +} + +impl ListOrderedMultimap +where + State: BuildHasher, +{ + /// Creates a new multimap with the specified capacities and the given hash builder to hash keys. + /// + /// The multimap will be able to hold at least `key_capacity` keys and `value_capacity` values without reallocating. A + /// capacity of 0 will result in no allocation for the respective container. + /// + /// The `state` is normally randomly generated and is designed to allow multimaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it manually using this function can expose a DoS attack + /// vector. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use std::collections::hash_map::RandomState; + /// + /// let state = RandomState::new(); + /// let mut map = ListOrderedMultimap::with_capacity_and_hasher(10, 10, state); + /// map.insert("key", "value"); + /// assert_eq!(map.keys_capacity(), 10); + /// assert_eq!(map.values_capacity(), 10); + /// ``` + #[must_use] + pub fn with_capacity_and_hasher( + key_capacity: usize, + value_capacity: usize, + state: State, + ) -> ListOrderedMultimap { + ListOrderedMultimap { + build_hasher: state, + keys: VecList::with_capacity(key_capacity), + map: HashMap::with_capacity_and_hasher(key_capacity, DummyState), + values: VecList::with_capacity(value_capacity), + } + } + + /// Creates a new multimap with no capacity which will use the given hash builder to hash keys. + /// + /// The `state` is normally randomly generated and is designed to allow multimaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it manually using this function can expose a DoS attack + /// vector. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use std::collections::hash_map::RandomState; + /// + /// let state = RandomState::new(); + /// let mut map = ListOrderedMultimap::with_hasher(state); + /// map.insert("key", "value"); + /// ``` + #[must_use] + pub fn with_hasher(state: State) -> ListOrderedMultimap { + ListOrderedMultimap { + build_hasher: state, + keys: VecList::new(), + map: HashMap::with_hasher(DummyState), + values: VecList::new(), + } + } +} + +impl ListOrderedMultimap { + /// Returns an immutable reference to the first key-value pair in the multimap + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert_eq!(map.back(), None); + /// + /// map.insert("key", "value"); + /// assert_eq!(map.back(), Some((&"key", &"value"))); + /// ``` + #[must_use] + pub fn back(&self) -> Option<(&Key, &Value)> { + self.iter().next_back() + } + + /// Returns an immutable reference to the first key-value pair in the multimap + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert_eq!(map.back_mut(), None); + /// + /// map.insert("key", "value"); + /// assert_eq!(map.back_mut(), Some((&"key", &mut "value"))); + /// ``` + #[must_use] + pub fn back_mut(&mut self) -> Option<(&Key, &mut Value)> { + self.iter_mut().next_back() + } + + /// Removes all keys and values from the multimap. + /// + /// Complexity: O(|K| + |V|) where |K| is the number of keys and |V| is the number of values. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value"); + /// assert_eq!(map.keys_len(), 1); + /// assert_eq!(map.values_len(), 1); + /// + /// map.clear(); + /// assert_eq!(map.keys_len(), 0); + /// assert_eq!(map.values_len(), 0); + /// ``` + pub fn clear(&mut self) { + self.keys.clear(); + self.map.clear(); + self.values.clear(); + } + + /// Returns an immutable reference to the first key-value pair in the multimap + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert_eq!(map.front(), None); + /// + /// map.insert("key", "value"); + /// assert_eq!(map.front(), Some((&"key", &"value"))); + /// ``` + #[must_use] + pub fn front(&self) -> Option<(&Key, &Value)> { + self.iter().next() + } + + /// Returns an immutable reference to the first key-value pair in the multimap + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert_eq!(map.front_mut(), None); + /// + /// map.insert("key", "value"); + /// assert_eq!(map.front_mut(), Some((&"key", &mut "value"))); + /// ``` + #[must_use] + pub fn front_mut(&mut self) -> Option<(&Key, &mut Value)> { + self.iter_mut().next() + } + + /// Returns a reference to the multimap's [`BuildHasher`]. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + /// let hasher = map.hasher(); + /// ``` + #[must_use] + pub fn hasher(&self) -> &State { + &self.build_hasher + } + + /// Returns whether the multimap is empty. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert!(map.is_empty()); + /// + /// map.insert("key1", "value"); + /// assert!(!map.is_empty()); + /// + /// map.remove(&"key1"); + /// assert!(map.is_empty()); + /// ``` + #[must_use] + pub fn is_empty(&self) -> bool { + self.keys.is_empty() + } + + /// Returns an iterator that yields immutable references to all key-value pairs in the multimap by insertion order. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key1", "value1"); + /// map.insert("key2", "value1"); + /// map.append(&"key1", "value2"); + /// map.append(&"key2", "value2"); + /// + /// let mut iter = map.iter(); + /// assert_eq!(iter.size_hint(), (4, Some(4))); + /// assert_eq!(iter.next(), Some((&"key1", &"value1"))); + /// assert_eq!(iter.next(), Some((&"key2", &"value1"))); + /// assert_eq!(iter.next(), Some((&"key1", &"value2"))); + /// assert_eq!(iter.next(), Some((&"key2", &"value2"))); + /// assert_eq!(iter.next(), None); + /// ``` + #[must_use] + pub fn iter(&self) -> Iter<'_, Key, Value> { + Iter { + keys: &self.keys, + iter: self.values.iter(), + } + } + + /// Returns an iterator that yields mutable references to all key-value pairs in the multimap by insertion order. + /// + /// Only the values are mutable, the keys are immutable. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key1", "value1"); + /// map.insert("key2", "value1"); + /// map.append(&"key1", "value2"); + /// map.append(&"key2", "value2"); + /// + /// let mut iter = map.iter_mut(); + /// assert_eq!(iter.size_hint(), (4, Some(4))); + /// + /// let first = iter.next().unwrap(); + /// assert_eq!(first, (&"key1", &mut "value1")); + /// *first.1 = "value3"; + /// + /// assert_eq!(iter.next(), Some((&"key2", &mut "value1"))); + /// assert_eq!(iter.next(), Some((&"key1", &mut "value2"))); + /// assert_eq!(iter.next(), Some((&"key2", &mut "value2"))); + /// assert_eq!(iter.next(), None); + /// + /// assert_eq!(map.get(&"key1"), Some(&"value3")); + /// ``` + #[must_use] + pub fn iter_mut(&mut self) -> IterMut<'_, Key, Value> { + IterMut { + keys: &self.keys, + iter: self.values.iter_mut(), + } + } + + /// Returns an iterator that yields immutable references to all keys in the multimap by insertion order. + /// + /// Insertion order of keys is determined by the order in which a given key is first inserted into the multimap with a + /// value. Any subsequent insertions with that key without first removing it will not affect its ordering. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key1", "value"); + /// map.insert("key2", "value"); + /// map.insert("key3", "value"); + /// + /// let mut keys = map.keys(); + /// assert_eq!(keys.next(), Some(&"key1")); + /// assert_eq!(keys.next(), Some(&"key2")); + /// assert_eq!(keys.next(), Some(&"key3")); + /// assert_eq!(keys.next(), None); + /// ``` + #[must_use] + pub fn keys(&self) -> Keys<'_, Key> { + Keys(self.keys.iter()) + } + + /// Returns the number of keys the multimap can hold without reallocating. + /// + /// This number is a lower bound, and the multimap may be able to hold more. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert_eq!(map.keys_capacity(), 0); + /// + /// map.insert("key", "value"); + /// assert!(map.keys_capacity() > 0); + /// ``` + #[must_use] + pub fn keys_capacity(&self) -> usize { + self.keys.capacity() + } + + /// Returns the number of keys in the multimap. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert_eq!(map.keys_len(), 0); + /// + /// map.insert("key1", "value"); + /// map.insert("key2", "value"); + /// map.insert("key3", "value"); + /// assert_eq!(map.keys_len(), 3); + /// ``` + #[must_use] + pub fn keys_len(&self) -> usize { + self.keys.len() + } + + /// Returns an iterator that yields immutable references to keys and all associated values with those keys as separate + /// iterators. The order of yielded pairs will be the order in which the keys were first inserted into the multimap. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// map.insert("key", "value1"); + /// map.append("key", "value2"); + /// + /// let mut iter = map.pairs(); + /// + /// let (key, mut values) = iter.next().unwrap(); + /// assert_eq!(key, &"key"); + /// assert_eq!(values.next(), Some(&"value1")); + /// assert_eq!(values.next(), Some(&"value2")); + /// assert_eq!(values.next(), None); + /// ``` + #[must_use] + pub fn pairs(&self) -> KeyValues<'_, Key, Value, State> { + KeyValues { + build_hasher: &self.build_hasher, + keys: &self.keys, + iter: self.keys.iter(), + map: &self.map, + values: &self.values, + } + } + + /// Returns an iterator that yields immutable references to keys and mutable references to all associated values with + /// those keys as separate iterators. The order of yielded pairs will be the order in which the keys were first + /// inserted into the multimap. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// map.insert("key", "value1"); + /// map.append("key", "value2"); + /// + /// let mut iter = map.pairs_mut(); + /// + /// let (key, mut values) = iter.next().unwrap(); + /// assert_eq!(key, &"key"); + /// assert_eq!(values.next(), Some(&mut "value1")); + /// assert_eq!(values.next(), Some(&mut "value2")); + /// assert_eq!(values.next(), None); + /// ``` + #[must_use] + pub fn pairs_mut(&mut self) -> KeyValuesMut<'_, Key, Value, State> { + KeyValuesMut { + build_hasher: &self.build_hasher, + keys: &self.keys, + iter: self.keys.iter(), + map: &self.map, + values: &mut self.values, + } + } + + /// Reserves additional capacity such that more values can be stored in the multimap. + /// + /// If the existing capacity minus the current length is enough to satisfy the additional capacity, the capacity will + /// remain unchanged. + /// + /// If the capacity is increased, the capacity may be increased by more than what was requested. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::with_capacity(1, 1); + /// + /// map.insert("key", "value"); + /// assert_eq!(map.values_capacity(), 1); + /// + /// map.reserve_values(10); + /// assert!(map.values_capacity() >= 11); + /// ``` + pub fn reserve_values(&mut self, additional_capacity: usize) { + self.values.reserve(additional_capacity); + } + + /// Returns an iterator that yields immutable references to all values in the multimap by insertion order. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key1", "value1"); + /// map.insert("key2", "value1"); + /// map.append(&"key1", "value2"); + /// map.append(&"key2", "value2"); + /// + /// let mut iter = map.values(); + /// assert_eq!(iter.size_hint(), (4, Some(4))); + /// assert_eq!(iter.next(), Some(&"value1")); + /// assert_eq!(iter.next(), Some(&"value1")); + /// assert_eq!(iter.next(), Some(&"value2")); + /// assert_eq!(iter.next(), Some(&"value2")); + /// assert_eq!(iter.next(), None); + /// ``` + #[must_use] + pub fn values(&self) -> Values<'_, Key, Value> { + Values(self.values.iter()) + } + + /// Returns an iterator that yields mutable references to all values in the multimap by insertion order. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key1", "value1"); + /// map.insert("key2", "value1"); + /// map.append(&"key1", "value2"); + /// map.append(&"key2", "value2"); + /// + /// let mut iter = map.values_mut(); + /// assert_eq!(iter.size_hint(), (4, Some(4))); + /// + /// let first = iter.next().unwrap(); + /// assert_eq!(first, &mut "value1"); + /// *first = "value3"; + /// + /// assert_eq!(iter.next(), Some(&mut "value1")); + /// assert_eq!(iter.next(), Some(&mut "value2")); + /// assert_eq!(iter.next(), Some(&mut "value2")); + /// assert_eq!(iter.next(), None); + /// + /// assert_eq!(map.get(&"key1"), Some(&"value3")); + /// ``` + #[must_use] + pub fn values_mut(&mut self) -> ValuesMut<'_, Key, Value> { + ValuesMut(self.values.iter_mut()) + } + + /// Returns the number of values the multimap can hold without reallocating. + /// + /// This number is a lower bound, and the multimap may be able to hold more. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert_eq!(map.values_capacity(), 0); + /// + /// map.insert("key", "value"); + /// assert!(map.values_capacity() > 0); + /// ``` + #[must_use] + pub fn values_capacity(&self) -> usize { + self.values.capacity() + } + + /// Returns the total number of values in the multimap across all keys. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert_eq!(map.values_len(), 0); + /// + /// map.insert("key1", "value1"); + /// assert_eq!(map.values_len(), 1); + /// + /// map.append("key1", "value2"); + /// assert_eq!(map.values_len(), 2); + /// ``` + #[must_use] + pub fn values_len(&self) -> usize { + self.values.len() + } +} + +impl ListOrderedMultimap +where + Key: Eq + Hash, + State: BuildHasher, +{ + /// Appends a value to the list of values associated with the given key. + /// + /// If the key is not already in the multimap, this will be identical to an insert and the return value will be + /// `false`. Otherwise, `true` will be returned. + /// + /// Complexity: amortized O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// let already_exists = map.append("key", "value"); + /// assert!(!already_exists); + /// assert_eq!(map.values_len(), 1); + /// assert_eq!(map.get(&"key"), Some(&"value")); + /// + /// let already_exists = map.append("key", "value2"); + /// assert!(already_exists); + /// assert_eq!(map.values_len(), 2); + /// ``` + pub fn append(&mut self, key: Key, value: Value) -> bool { + let hash = hash_key(&self.build_hasher, &key); + let entry = raw_entry_mut(&self.keys, &mut self.map, hash, &key); + let build_hasher = &self.build_hasher; + + match entry { + RawEntryMut::Occupied(mut entry) => { + let key_index = entry.key(); + let mut value_entry = ValueEntry::new(*key_index, value); + let map_entry = entry.get_mut(); + value_entry.previous_index = Some(map_entry.tail_index); + let index = self.values.push_back(value_entry); + self + .values + .get_mut(map_entry.tail_index) + .unwrap() + .next_index = Some(index); + map_entry.append(index); + true + } + RawEntryMut::Vacant(entry) => { + let key_index = self.keys.push_back(key); + let value_entry = ValueEntry::new(key_index, value); + let index = self.values.push_back(value_entry); + let keys = &self.keys; + let _ = entry.insert_with_hasher(hash, key_index, MapEntry::new(index), |&key_index| { + let key = keys.get(key_index).unwrap(); + hash_key(build_hasher, key) + }); + false + } + } + } + + /// Returns whether the given key is in the multimap. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert!(!map.contains_key(&"key")); + /// map.insert("key", "value"); + /// assert!(map.contains_key(&"key")); + /// ``` + #[must_use] + pub fn contains_key(&self, key: &KeyQuery) -> bool + where + Key: Borrow, + KeyQuery: ?Sized + Eq + Hash, + { + let hash = hash_key(&self.build_hasher, &key); + raw_entry(&self.keys, &self.map, hash, key).is_some() + } + + /// Returns whether the given key is in the multimap. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// let value = map.entry("key").or_insert("value"); + /// assert_eq!(value, &"value"); + /// assert_eq!(map.get(&"key"), Some(&"value")); + /// ``` + #[must_use] + pub fn entry(&mut self, key: Key) -> Entry<'_, Key, Value, State> { + let hash = hash_key(&self.build_hasher, &key); + + // TODO: This ugliness arises from borrow checking issues which seems to happen when the vacant entry is created in + // the match block further below for `Vacant` even though it should be perfectly safe. Is there a better way to do + // this? + if !self.contains_key(&key) { + Entry::Vacant(VacantEntry { + build_hasher: &self.build_hasher, + hash, + key, + keys: &mut self.keys, + map: &mut self.map, + values: &mut self.values, + }) + } else { + match raw_entry_mut(&self.keys, &mut self.map, hash, &key) { + RawEntryMut::Occupied(entry) => Entry::Occupied(OccupiedEntry { + entry, + keys: &mut self.keys, + values: &mut self.values, + }), + _ => panic!("expected occupied entry"), + } + } + } + + /// Returns the number of values associated with a key. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert_eq!(map.entry_len(&"key"), 0); + /// + /// map.insert("key", "value1"); + /// assert_eq!(map.entry_len(&"key"), 1); + /// + /// map.append(&"key", "value2"); + /// assert_eq!(map.entry_len(&"key"), 2); + /// ``` + #[must_use] + pub fn entry_len(&self, key: &KeyQuery) -> usize + where + Key: Borrow, + KeyQuery: ?Sized + Eq + Hash, + { + let hash = hash_key(&self.build_hasher, &key); + + match raw_entry(&self.keys, &self.map, hash, key) { + Some((_, map_entry)) => map_entry.length, + None => 0, + } + } + + /// Returns an immutable reference to the first value, by insertion order, associated with the given key, or `None` if + /// the key is not in the multimap. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + /// assert_eq!(map.get(&"key"), None); + /// + /// ``` + #[must_use] + pub fn get(&self, key: &KeyQuery) -> Option<&Value> + where + Key: Borrow, + KeyQuery: ?Sized + Eq + Hash, + { + let hash = hash_key(&self.build_hasher, &key); + let (_, map_entry) = raw_entry(&self.keys, &self.map, hash, key)?; + self + .values + .get(map_entry.head_index) + .map(|entry| &entry.value) + } + + /// Returns an iterator that yields immutable references to all values associated with the given key by insertion + /// order. + /// + /// If the key is not in the multimap, the iterator will yield no values. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value"); + /// map.append("key", "value2"); + /// + /// let mut iter = map.get_all(&"key"); + /// assert_eq!(iter.next(), Some(&"value")); + /// assert_eq!(iter.next(), Some(&"value2")); + /// assert_eq!(iter.next(), None); + /// ``` + #[must_use] + pub fn get_all(&self, key: &KeyQuery) -> EntryValues<'_, Key, Value> + where + Key: Borrow, + KeyQuery: ?Sized + Eq + Hash, + { + let hash = hash_key(&self.build_hasher, &key); + + match raw_entry(&self.keys, &self.map, hash, key) { + Some((_, map_entry)) => EntryValues::from_map_entry(&self.values, map_entry), + None => EntryValues::empty(&self.values), + } + } + + /// Returns an iterator that yields mutable references to all values associated with the given key by insertion order. + /// + /// If the key is not in the multimap, the iterator will yield no values. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// map.append("key", "value2"); + /// + /// let mut iter = map.get_all_mut(&"key"); + /// + /// let first = iter.next().unwrap(); + /// assert_eq!(first, &mut "value1"); + /// *first = "value3"; + /// + /// assert_eq!(iter.next(), Some(&mut "value2")); + /// assert_eq!(iter.next(), None); + /// + /// assert_eq!(map.get(&"key"), Some(&"value3")); + /// ``` + #[must_use] + pub fn get_all_mut(&mut self, key: &KeyQuery) -> EntryValuesMut<'_, Key, Value> + where + Key: Borrow, + KeyQuery: ?Sized + Eq + Hash, + { + let hash = hash_key(&self.build_hasher, &key); + + match raw_entry(&self.keys, &self.map, hash, key) { + Some((_, map_entry)) => EntryValuesMut::from_map_entry(&mut self.values, map_entry), + None => EntryValuesMut::empty(&mut self.values), + } + } + + /// Returns a mutable reference to the first value, by insertion order, associated with the given key, or `None` if + /// the key is not in the multimap. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert_eq!(map.get(&"key"), None); + /// + /// map.insert("key", "value"); + /// assert_eq!(map.get(&"key"), Some(&"value")); + /// + /// let mut value = map.get_mut(&"key").unwrap(); + /// *value = "value2"; + /// + /// assert_eq!(map.get(&"key"), Some(&"value2")); + /// ``` + #[must_use] + pub fn get_mut(&mut self, key: &KeyQuery) -> Option<&mut Value> + where + Key: Borrow, + KeyQuery: ?Sized + Eq + Hash, + { + let hash = hash_key(&self.build_hasher, &key); + let (_, map_entry) = raw_entry(&self.keys, &self.map, hash, key)?; + self + .values + .get_mut(map_entry.head_index) + .map(|entry| &mut entry.value) + } + + /// Inserts the key-value pair into the multimap and returns the first value, by insertion order, that was already + /// associated with the key. + /// + /// If the key is not already in the multimap, `None` will be returned. If the key is already in the multimap, the + /// insertion ordering of the keys will remain unchanged. + /// + /// Complexity: O(1) amortized + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert!(map.is_empty()); + /// + /// let old_value = map.insert("key", "value"); + /// assert!(old_value.is_none()); + /// assert_eq!(map.values_len(), 1); + /// assert_eq!(map.get(&"key"), Some(&"value")); + /// + /// let old_value = map.insert("key", "value2"); + /// assert_eq!(old_value, Some("value")); + /// assert_eq!(map.values_len(), 1); + /// assert_eq!(map.get(&"key"), Some(&"value2")); + /// ``` + pub fn insert(&mut self, key: Key, value: Value) -> Option { + self.insert_all(key, value).next() + } + + /// Inserts the key-value pair into the multimap and returns an iterator that yields all values previously associated + /// with the key by insertion order. + /// + /// If the key is not already in the multimap, the iterator will yield no values.If the key is already in the + /// multimap, the insertion ordering of the keys will remain unchanged. + /// + /// Complexity: O(1) amortized + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// assert!(map.is_empty()); + /// + /// { + /// let mut old_values = map.insert_all("key", "value"); + /// assert_eq!(old_values.next(), None); + /// } + /// + /// assert_eq!(map.values_len(), 1); + /// assert_eq!(map.get(&"key"), Some(&"value")); + /// + /// map.append("key", "value2"); + /// + /// { + /// let mut old_values = map.insert_all("key", "value3"); + /// assert_eq!(old_values.next(), Some("value")); + /// assert_eq!(old_values.next(), Some("value2")); + /// assert_eq!(old_values.next(), None); + /// } + /// + /// assert_eq!(map.values_len(), 1); + /// assert_eq!(map.get(&"key"), Some(&"value3")); + /// ``` + pub fn insert_all(&mut self, key: Key, value: Value) -> EntryValuesDrain<'_, Key, Value> { + let hash = hash_key(&self.build_hasher, &key); + let entry = raw_entry_mut(&self.keys, &mut self.map, hash, &key); + let build_hasher = &self.build_hasher; + + match entry { + RawEntryMut::Occupied(mut entry) => { + let key_index = entry.key(); + let value_entry = ValueEntry::new(*key_index, value); + let index = self.values.push_back(value_entry); + let map_entry = entry.get_mut(); + let iter = EntryValuesDrain::from_map_entry(&mut self.values, map_entry); + map_entry.reset(index); + iter + } + RawEntryMut::Vacant(entry) => { + let key_index = self.keys.push_back(key); + let value_entry = ValueEntry::new(key_index, value); + let index = self.values.push_back(value_entry); + let keys = &self.keys; + let _ = entry.insert_with_hasher(hash, key_index, MapEntry::new(index), |&key_index| { + let key = keys.get(key_index).unwrap(); + hash_key(build_hasher, key) + }); + EntryValuesDrain::empty(&mut self.values) + } + } + } + + /// Reorganizes the multimap to ensure maximum spatial locality and changes the key and value capacities to the + /// provided values. + /// + /// This function can be used to actually increase the capacity of the multimap. + /// + /// Complexity: O(|K| + |V|) where |K| is the number of keys and |V| is the number of values. + /// + /// # Panics + /// + /// Panics if either of the given minimum capacities are less than their current respective lengths. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::with_capacity(10, 10); + /// + /// map.insert("key1", "value1"); + /// map.insert("key2", "value2"); + /// map.append("key2", "value3"); + /// map.append("key1", "value4"); + /// map.pack_to(5, 5); + /// + /// assert_eq!(map.keys_capacity(), 5); + /// assert_eq!(map.keys_len(), 2); + /// assert_eq!(map.values_capacity(), 5); + /// assert_eq!(map.values_len(), 4); + /// ``` + #[cfg(feature = "std")] + pub fn pack_to(&mut self, keys_minimum_capacity: usize, values_minimum_capacity: usize) + where + State: Default, + { + assert!( + keys_minimum_capacity >= self.keys_len(), + "cannot pack multimap keys lower than current length" + ); + assert!( + values_minimum_capacity >= self.values_len(), + "cannot pack multimap values lower than current length" + ); + + let key_map = self.keys.pack_to(keys_minimum_capacity); + let value_map = self.values.pack_to(values_minimum_capacity); + let mut map = HashMap::with_capacity_and_hasher(keys_minimum_capacity, DummyState); + let build_hasher = &self.build_hasher; + + for value_entry in self.values.iter_mut() { + value_entry.key_index = key_map[&value_entry.key_index]; + value_entry.next_index = value_entry.next_index.map(|index| value_map[&index]); + value_entry.previous_index = value_entry.previous_index.map(|index| value_map[&index]); + } + + for (key_index, mut map_entry) in self.map.drain() { + map_entry.head_index = value_map[&map_entry.head_index]; + map_entry.tail_index = value_map[&map_entry.tail_index]; + let key_index = key_map[&key_index]; + let key = self.keys.get(key_index).unwrap(); + let hash = hash_key(&self.build_hasher, key); + + match map.raw_entry_mut().from_hash(hash, |_| false) { + RawEntryMut::Vacant(entry) => { + let keys = &self.keys; + let _ = entry.insert_with_hasher(hash, key_index, map_entry, |&key_index| { + let key = keys.get(key_index).unwrap(); + hash_key(build_hasher, key) + }); + } + _ => panic!("expected vacant entry"), + } + } + + self.map = map; + } + + /// Reorganizes the multimap to ensure maximum spatial locality and removes any excess key and value capacity. + /// + /// Complexity: O(|K| + |V|) where |K| is the number of keys and |V| is the number of values. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::with_capacity(5, 5); + /// + /// map.insert("key1", "value1"); + /// map.insert("key2", "value2"); + /// map.append("key2", "value3"); + /// map.append("key1", "value4"); + /// map.pack_to_fit(); + /// + /// assert_eq!(map.keys_capacity(), 2); + /// assert_eq!(map.keys_len(), 2); + /// assert_eq!(map.values_capacity(), 4); + /// assert_eq!(map.values_len(), 4); + /// ``` + #[cfg(feature = "std")] + pub fn pack_to_fit(&mut self) + where + State: Default, + { + self.pack_to(self.keys_len(), self.values_len()); + } + + /// Removes the last key-value pair to have been inserted. + /// + /// Because a single key can be associated with many values, the key returned by this function is a [`KeyWrapper`] + /// which can be either owned or borrowed. If the value removed was the only value associated with the key, then the + /// key will be returned. Otherwise, a reference to the key will be returned. + /// + /// This function along with [`ListOrderedMultimap::pop_front`] act as replacements for a drain iterator since an + /// iterator cannot be done over [`KeyWrapper`]. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::KeyWrapper; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// map.insert("key", "value1"); + /// map.append("key", "value2"); + /// + /// let (key, value) = map.pop_back().unwrap(); + /// assert_eq!(key, KeyWrapper::Borrowed(&"key")); + /// assert_eq!(&value, &"value2"); + /// + /// let (key, value) = map.pop_back().unwrap(); + /// assert_eq!(key, KeyWrapper::Owned("key")); + /// assert_eq!(&value, &"value1"); + /// ``` + pub fn pop_back(&mut self) -> Option<(KeyWrapper<'_, Key>, Value)> { + let value_entry = self.values.pop_back()?; + + let key_wrapper = match value_entry.previous_index { + Some(previous_index) => { + let key = self.keys.get(value_entry.key_index).unwrap(); + let hash = hash_key(&self.build_hasher, &key); + + let mut entry = match raw_entry_mut(&self.keys, &mut self.map, hash, key) { + RawEntryMut::Occupied(entry) => entry, + _ => panic!("expected occupied entry in internal map"), + }; + let map_entry = entry.get_mut(); + map_entry.length -= 1; + map_entry.tail_index = previous_index; + + let previous_value_entry = self.values.get_mut(previous_index).unwrap(); + previous_value_entry.next_index = None; + + KeyWrapper::Borrowed(key) + } + None => { + let key = self.keys.remove(value_entry.key_index).unwrap(); + let hash = hash_key(&self.build_hasher, &key); + + match raw_entry_mut_empty(&self.keys, &mut self.map, hash) { + RawEntryMut::Occupied(entry) => { + let _ = entry.remove(); + } + _ => panic!("expectd occupied entry in internal map"), + } + + KeyWrapper::Owned(key) + } + }; + + Some((key_wrapper, value_entry.value)) + } + + /// Removes the first key-value pair to have been inserted. + /// + /// Because a single key can be associated with many values, the key returned by this function is a [`KeyWrapper`] + /// which can be either owned or borrowed. If the value removed was the only value associated with the key, then the + /// key will be returned. Otherwise, a reference to the key will be returned. + /// + /// This function along with [`ListOrderedMultimap::pop_back`] act as replacements for a drain iterator since an + /// iterator cannot be done over [`KeyWrapper`]. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::KeyWrapper; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// map.insert("key", "value1"); + /// map.append("key", "value2"); + /// + /// let (key, value) = map.pop_front().unwrap(); + /// assert_eq!(key, KeyWrapper::Borrowed(&"key")); + /// assert_eq!(&value, &"value1"); + /// + /// let (key, value) = map.pop_front().unwrap(); + /// assert_eq!(key, KeyWrapper::Owned("key")); + /// assert_eq!(&value, &"value2"); + /// ``` + pub fn pop_front(&mut self) -> Option<(KeyWrapper<'_, Key>, Value)> { + let value_entry = self.values.pop_front()?; + + let key_wrapper = match value_entry.next_index { + Some(next_index) => { + let key = self.keys.get(value_entry.key_index).unwrap(); + let hash = hash_key(&self.build_hasher, &key); + + let mut entry = match raw_entry_mut(&self.keys, &mut self.map, hash, key) { + RawEntryMut::Occupied(entry) => entry, + _ => panic!("expected occupied entry in internal map"), + }; + let map_entry = entry.get_mut(); + map_entry.length -= 1; + map_entry.head_index = next_index; + + let next_value_entry = self.values.get_mut(next_index).unwrap(); + next_value_entry.previous_index = None; + + KeyWrapper::Borrowed(key) + } + None => { + let key = self.keys.remove(value_entry.key_index).unwrap(); + let hash = hash_key(&self.build_hasher, &key); + + match raw_entry_mut_empty(&self.keys, &mut self.map, hash) { + RawEntryMut::Occupied(entry) => { + let _ = entry.remove(); + } + _ => panic!("expectd occupied entry in internal map"), + } + + KeyWrapper::Owned(key) + } + }; + + Some((key_wrapper, value_entry.value)) + } + + /// Removes all values associated with the given key from the map and returns the first value by insertion order. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// let removed_value = map.remove(&"key"); + /// assert_eq!(removed_value, None); + /// + /// map.insert("key", "value"); + /// assert_eq!(map.get(&"key"), Some(&"value")); + /// + /// let removed_value = map.remove(&"key"); + /// assert_eq!(removed_value, Some("value")); + /// assert_eq!(map.get(&"key"), None); + /// ``` + pub fn remove(&mut self, key: &KeyQuery) -> Option + where + Key: Borrow, + KeyQuery: ?Sized + Eq + Hash, + { + self.remove_entry(key).map(|(_, value)| value) + } + + /// Removes all values associated with the given key from the map and returns an iterator that yields those values. + /// + /// If the key is not already in the map, the iterator will yield no values. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// { + /// let mut removed_values = map.remove_all(&"key"); + /// assert_eq!(removed_values.next(), None); + /// } + /// + /// map.insert("key", "value1"); + /// map.append("key", "value2"); + /// assert_eq!(map.get(&"key"), Some(&"value1")); + /// + /// { + /// let mut removed_values = map.remove_all(&"key"); + /// assert_eq!(removed_values.next(), Some("value1")); + /// assert_eq!(removed_values.next(), Some("value2")); + /// assert_eq!(removed_values.next(), None); + /// } + /// + /// assert_eq!(map.get(&"key"), None); + /// ``` + pub fn remove_all(&mut self, key: &KeyQuery) -> EntryValuesDrain<'_, Key, Value> + where + Key: Borrow, + KeyQuery: ?Sized + Eq + Hash, + { + let hash = hash_key(&self.build_hasher, &key); + let entry = raw_entry_mut(&self.keys, &mut self.map, hash, key); + + match entry { + RawEntryMut::Occupied(entry) => { + let (key_index, map_entry) = entry.remove_entry(); + let _ = self.keys.remove(key_index).unwrap(); + EntryValuesDrain::from_map_entry(&mut self.values, &map_entry) + } + RawEntryMut::Vacant(_) => EntryValuesDrain::empty(&mut self.values), + } + } + + /// Removes all values associated with the given key from the map and returns the key and first value. + /// + /// If the key is not already in the map, then `None` will be returned. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// let entry = map.remove_entry(&"key"); + /// assert_eq!(entry, None); + /// + /// map.insert("key", "value"); + /// assert_eq!(map.get(&"key"), Some(&"value")); + /// + /// let entry = map.remove_entry(&"key"); + /// assert_eq!(entry, Some(("key", "value"))); + /// assert_eq!(map.get(&"key"), None); + /// ``` + pub fn remove_entry(&mut self, key: &KeyQuery) -> Option<(Key, Value)> + where + Key: Borrow, + KeyQuery: ?Sized + Eq + Hash, + { + let (key, mut iter) = self.remove_entry_all(key)?; + Some((key, iter.next().unwrap())) + } + + /// Removes all values associated with the given key from the map and returns the key and an iterator that yields + /// those values. + /// + /// If the key is not already in the map, then `None` will be returned. + /// + /// Complexity: O(1) + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// { + /// let entry = map.remove_entry_all(&"key"); + /// assert!(entry.is_none()); + /// } + /// + /// map.insert("key", "value1"); + /// map.append("key", "value2"); + /// assert_eq!(map.get(&"key"), Some(&"value1")); + /// + /// { + /// let (key, mut iter) = map.remove_entry_all(&"key").unwrap(); + /// assert_eq!(key, "key"); + /// assert_eq!(iter.next(), Some("value1")); + /// assert_eq!(iter.next(), Some("value2")); + /// assert_eq!(iter.next(), None); + /// } + /// + /// assert_eq!(map.get(&"key"), None); + /// ``` + pub fn remove_entry_all( + &mut self, + key: &KeyQuery, + ) -> Option<(Key, EntryValuesDrain<'_, Key, Value>)> + where + Key: Borrow, + KeyQuery: ?Sized + Eq + Hash, + { + let hash = hash_key(&self.build_hasher, &key); + let entry = raw_entry_mut(&self.keys, &mut self.map, hash, key); + + match entry { + RawEntryMut::Occupied(entry) => { + let (key_index, map_entry) = entry.remove_entry(); + let key = self.keys.remove(key_index).unwrap(); + let iter = EntryValuesDrain::from_map_entry(&mut self.values, &map_entry); + Some((key, iter)) + } + _ => None, + } + } + + /// Reserves additional capacity such that more keys can be stored in the multimap. + /// + /// If the existing capacity minus the current length is enough to satisfy the additional capacity, the capacity will + /// remain unchanged. + /// + /// If the capacity is increased, the capacity may be increased by more than what was requested. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::with_capacity(1, 1); + /// + /// map.insert("key", "value"); + /// assert_eq!(map.keys_capacity(), 1); + /// + /// map.reserve_keys(10); + /// assert!(map.keys_capacity() >= 11); + /// assert_eq!(map.get(&"key"), Some(&"value")); + /// ``` + pub fn reserve_keys(&mut self, additional_capacity: usize) { + if self.keys.capacity() - self.keys.len() >= additional_capacity { + return; + } + + let capacity = self.map.capacity() + additional_capacity; + let mut map = HashMap::with_capacity_and_hasher(capacity, DummyState); + + for (key_index, map_entry) in self.map.drain() { + let key = self.keys.get(key_index).unwrap(); + let hash = hash_key(&self.build_hasher, key); + let entry = match raw_entry_mut(&self.keys, &mut map, hash, key) { + RawEntryMut::Vacant(entry) => entry, + _ => panic!("expected vacant entry"), + }; + let _ = entry.insert_hashed_nocheck(hash, key_index, map_entry); + } + + self.keys.reserve(additional_capacity); + self.map = map; + } + + /// Keeps all key-value pairs that satisfy the given predicate function. + /// + /// Complexity: O(|V|) where |V| is the number of values + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// map.insert("key1", 1); + /// map.insert("key2", 5); + /// map.append("key1", -1); + /// map.insert("key3", -10); + /// + /// map.retain(|_, &mut value| value >= 0); + /// + /// let mut iter = map.iter(); + /// assert_eq!(iter.next(), Some((&"key1", &1))); + /// assert_eq!(iter.next(), Some((&"key2", &5))); + /// assert_eq!(iter.next(), None); + /// ``` + pub fn retain(&mut self, function: Function) + where + Function: FnMut(&Key, &mut Value) -> bool, + { + ListOrderedMultimap::retain_helper( + &self.build_hasher, + &mut self.keys, + &mut self.map, + &mut self.values, + function, + ); + } + + /// Helper function for [`ListOrderedMultimap::retain`] to deal with borrowing issues. + fn retain_helper<'map, Function>( + build_hasher: &'map State, + keys: &'map mut VecList, + map: &'map mut HashMap, MapEntry, DummyState>, + values: &'map mut VecList>, + mut function: Function, + ) where + Function: FnMut(&Key, &mut Value) -> bool, + { + let mut post_updates = vec![]; + + values.retain(|value_entry| { + let key = keys.get(value_entry.key_index).unwrap(); + + if function(key, &mut value_entry.value) { + true + } else { + let hash = hash_key(build_hasher, key); + let mut entry = match raw_entry_mut(keys, map, hash, key) { + RawEntryMut::Occupied(entry) => entry, + _ => panic!("expected occupied entry in internal map"), + }; + + if value_entry.previous_index.is_none() && value_entry.next_index.is_none() { + let _ = entry.remove(); + let _ = keys.remove(value_entry.key_index); + } else { + let map_entry = entry.get_mut(); + map_entry.length -= 1; + + if let Some(previous_index) = value_entry.previous_index { + post_updates.push((previous_index, None, Some(value_entry.next_index))); + } else { + map_entry.head_index = value_entry.next_index.unwrap(); + } + + if let Some(next_index) = value_entry.next_index { + post_updates.push((next_index, Some(value_entry.previous_index), None)); + } else { + map_entry.tail_index = value_entry.previous_index.unwrap(); + } + } + + false + } + }); + + for (index, new_previous_index, new_next_index) in post_updates { + let value_entry = values.get_mut(index).unwrap(); + + if let Some(new_previous_index) = new_previous_index { + value_entry.previous_index = new_previous_index; + } + + if let Some(new_next_index) = new_next_index { + value_entry.next_index = new_next_index; + } + } + } +} + +impl Debug for ListOrderedMultimap +where + Key: Debug, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.debug_map().entries(self.iter()).finish() + } +} + +#[cfg(feature = "std")] +impl Default for ListOrderedMultimap { + fn default() -> Self { + Self::new() + } +} + +impl Eq for ListOrderedMultimap +where + Key: Eq, + Value: PartialEq, +{ +} + +impl Extend<(Key, Value)> for ListOrderedMultimap +where + Key: Eq + Hash, + State: BuildHasher, +{ + fn extend(&mut self, iter: Iter) + where + Iter: IntoIterator, + { + let iter = iter.into_iter(); + self.reserve_values(iter.size_hint().0); + + for (key, value) in iter { + let _ = self.append(key, value); + } + } +} + +impl<'a, Key, Value, State> Extend<(&'a Key, &'a Value)> for ListOrderedMultimap +where + Key: Copy + Eq + Hash, + Value: Copy, + State: BuildHasher, +{ + fn extend(&mut self, iter: Iter) + where + Iter: IntoIterator, + { + self.extend(iter.into_iter().map(|(&key, &value)| (key, value))); + } +} + +impl FromIterator<(Key, Value)> for ListOrderedMultimap +where + Key: Eq + Hash, + State: BuildHasher + Default, +{ + fn from_iter(iter: Iter) -> Self + where + Iter: IntoIterator, + { + let mut map = ListOrderedMultimap::with_hasher(State::default()); + map.extend(iter); + map + } +} + +impl IntoIterator for ListOrderedMultimap +where + Key: Clone, +{ + type IntoIter = IntoIter; + type Item = (Key, Value); + + fn into_iter(self) -> Self::IntoIter { + IntoIter { + keys: self.keys, + iter: self.values.into_iter(), + } + } +} + +impl<'map, Key, Value, State> IntoIterator for &'map ListOrderedMultimap { + type IntoIter = Iter<'map, Key, Value>; + type Item = (&'map Key, &'map Value); + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'map, Key, Value, State> IntoIterator for &'map mut ListOrderedMultimap { + type IntoIter = IterMut<'map, Key, Value>; + type Item = (&'map Key, &'map mut Value); + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +impl PartialEq for ListOrderedMultimap +where + Key: PartialEq, + Value: PartialEq, +{ + fn eq(&self, other: &ListOrderedMultimap) -> bool { + if self.keys_len() != other.keys_len() || self.values_len() != other.values_len() { + return false; + } + + self.iter().eq(other.iter()) + } +} + +/// A wrapper around a key that is either borrowed or owned. +/// +/// This type is similar to [`std::borrow::Cow`] but does not require a [`Clone`] trait bound on the key. +#[allow(single_use_lifetimes)] +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +pub enum KeyWrapper<'map, Key> { + /// An immutable reference to a key. This implies that the key is still associated to at least one value in the + /// multimap. + Borrowed(&'map Key), + + /// An owned key. This will occur when a key is no longer associated with any values in the multimap. + Owned(Key), +} + +impl KeyWrapper<'_, Key> { + /// If the key wrapped is owned, it is returned. Otherwise, the borrowed key is cloned and returned. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::list_ordered_multimap::KeyWrapper; + /// + /// let borrowed = KeyWrapper::Borrowed(&0); + /// assert_eq!(borrowed.into_owned(), 0); + /// + /// let owned = KeyWrapper::Owned(0); + /// assert_eq!(borrowed.into_owned(), 0); + /// ``` + #[must_use] + pub fn into_owned(self) -> Key + where + Key: Clone, + { + match self { + KeyWrapper::Borrowed(key) => key.clone(), + KeyWrapper::Owned(key) => key, + } + } + + /// Returns whether the wrapped key is borrowed. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::list_ordered_multimap::KeyWrapper; + /// + /// let borrowed = KeyWrapper::Borrowed(&0); + /// assert!(borrowed.is_borrowed()); + /// + /// let owned = KeyWrapper::Owned(0); + /// assert!(!owned.is_borrowed()); + /// ``` + #[must_use] + pub fn is_borrowed(&self) -> bool { + matches!(self, KeyWrapper::Borrowed(_)) + } + + /// Returns whether the wrapped key is owned. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::list_ordered_multimap::KeyWrapper; + /// + /// let borrowed = KeyWrapper::Borrowed(&0); + /// assert!(!borrowed.is_owned()); + /// + /// let owned = KeyWrapper::Owned(0); + /// assert!(owned.is_owned()); + /// ``` + #[must_use] + pub fn is_owned(&self) -> bool { + matches!(self, KeyWrapper::Owned(_)) + } +} + +/// The value type of the internal hash map. +#[derive(Clone)] +pub(crate) struct MapEntry { + /// The index of the first value for this entry. + head_index: Index>, + + /// The number of values for this entry. + length: usize, + + /// The index of the last value for this entry. + tail_index: Index>, +} + +impl MapEntry { + /// Convenience function for adding a new value to the entry. + pub fn append(&mut self, index: Index>) { + self.length += 1; + self.tail_index = index; + } + + /// Convenience function for creating a new multimap entry. + #[must_use] + pub fn new(index: Index>) -> Self { + MapEntry { + head_index: index, + length: 1, + tail_index: index, + } + } + + /// Convenience function for resetting the entry to contain only one value. + pub fn reset(&mut self, index: Index>) { + self.head_index = index; + self.length = 1; + self.tail_index = index; + } +} + +/// The value entry that is contained within the internal values list. +#[derive(Clone)] +pub(crate) struct ValueEntry { + /// The index of the key in the key list for this entry. + key_index: Index, + + /// The index of the next value with the same key. + next_index: Option>>, + + /// The index of the previous value with the same key. + previous_index: Option>>, + + /// The actual value stored in this entry. + value: Value, +} + +impl ValueEntry { + /// Convenience function for creating a new value entry. + #[must_use] + pub fn new(key_index: Index, value: Value) -> Self { + ValueEntry { + key_index, + next_index: None, + previous_index: None, + value, + } + } +} + +/// A view into a single entry in the multimap, which may either be vacant or occupied. +pub enum Entry<'map, Key, Value, State = RandomState> { + /// An occupied entry associated with one or more values. + Occupied(OccupiedEntry<'map, Key, Value>), + + /// A vacant entry with no associated values. + Vacant(VacantEntry<'map, Key, Value, State>), +} + +impl<'map, Key, Value, State> Entry<'map, Key, Value, State> +where + Key: Eq + Hash, + State: BuildHasher, +{ + /// Calls the given function with a mutable reference to the first value of this entry, by insertion order, if it is + /// vacant, otherwise this function is a no-op. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// map.entry("key") + /// .and_modify(|value| *value += 1) + /// .or_insert(42); + /// assert_eq!(map.get(&"key"), Some(&42)); + /// + /// map.entry("key") + /// .and_modify(|value| *value += 1) + /// .or_insert(42); + /// assert_eq!(map.get(&"key"), Some(&43)); + /// ``` + pub fn and_modify(self, function: Function) -> Self + where + Function: FnOnce(&mut Value), + { + match self { + Entry::Occupied(mut entry) => { + function(entry.get_mut()); + Entry::Occupied(entry) + } + Entry::Vacant(entry) => Entry::Vacant(entry), + } + } + + /// If the entry is vacant, the given value will be inserted into it and a mutable reference to that value will be + /// returned. Otherwise, a mutable reference to the first value, by insertion order, will be returned. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let value = map.entry("key").or_insert("value2"); + /// assert_eq!(value, &"value1"); + /// + /// let value = map.entry("key2").or_insert("value2"); + /// assert_eq!(value, &"value2"); + /// ``` + pub fn or_insert(self, value: Value) -> &'map mut Value { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(value), + } + } + + /// If the entry is vacant, the given value will be inserted into it and the new occupied entry will be returned. + /// Otherwise, the existing occupied entry will be returned. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let entry = map.entry("key").or_insert_entry("value2"); + /// assert_eq!(entry.into_mut(), &"value1"); + /// + /// let entry = map.entry("key2").or_insert_entry("value2"); + /// assert_eq!(entry.into_mut(), &"value2"); + /// ``` + pub fn or_insert_entry(self, value: Value) -> OccupiedEntry<'map, Key, Value> { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => entry.insert_entry(value), + } + } + + /// If the entry is vacant, the value returned from the given function will be inserted into it and a mutable + /// reference to that value will be returned. Otherwise, a mutable reference to the first value, by insertion order, + /// will be returned. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let value = map.entry("key").or_insert_with(|| "value2"); + /// assert_eq!(value, &"value1"); + /// + /// let value = map.entry("key2").or_insert_with(|| "value2"); + /// assert_eq!(value, &"value2"); + /// ``` + pub fn or_insert_with(self, function: Function) -> &'map mut Value + where + Function: FnOnce() -> Value, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(function()), + } + } + + /// If the entry is vacant, the value returned from the given function will be inserted into it and the new occupied + /// entry will be returned. Otherwise, the existing occupied entry will be returned. + /// + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let entry = map.entry("key").or_insert_with_entry(|| "value2"); + /// assert_eq!(entry.into_mut(), &"value1"); + /// + /// let entry = map.entry("key2").or_insert_with_entry(|| "value2"); + /// assert_eq!(entry.into_mut(), &"value2"); + /// ``` + pub fn or_insert_with_entry(self, function: Function) -> OccupiedEntry<'map, Key, Value> + where + Function: FnOnce() -> Value, + { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => entry.insert_entry(function()), + } + } +} + +impl Debug for Entry<'_, Key, Value, State> +where + Key: Debug, + State: BuildHasher, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + match self { + Entry::Occupied(entry) => entry.fmt(formatter), + Entry::Vacant(entry) => entry.fmt(formatter), + } + } +} + +/// A view into an occupied entry in the multimap. +pub struct OccupiedEntry<'map, Key, Value> { + entry: RawOccupiedEntryMut<'map, Index, MapEntry, DummyState>, + + keys: &'map mut VecList, + + values: &'map mut VecList>, +} + +#[allow(clippy::len_without_is_empty)] +impl<'map, Key, Value> OccupiedEntry<'map, Key, Value> { + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// entry.append("value2"); + /// + /// let mut iter = map.get_all(&"key"); + /// assert_eq!(iter.next(), Some(&"value1")); + /// assert_eq!(iter.next(), Some(&"value2")); + /// assert_eq!(iter.next(), None); + /// ``` + pub fn append(&mut self, value: Value) { + let key_index = *self.entry.key(); + let map_entry = self.entry.get_mut(); + let mut value_entry = ValueEntry::new(key_index, value); + value_entry.previous_index = Some(map_entry.tail_index); + let index = self.values.push_back(value_entry); + self + .values + .get_mut(map_entry.tail_index) + .unwrap() + .next_index = Some(index); + map_entry.length += 1; + map_entry.tail_index = index; + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// assert_eq!(entry.get(), &"value"); + /// ``` + #[must_use] + pub fn get(&self) -> &Value { + let index = self.entry.get().head_index; + &self.values.get(index).unwrap().value + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// assert_eq!(entry.get(), &mut "value"); + /// ``` + #[must_use] + pub fn get_mut(&mut self) -> &mut Value { + let index = self.entry.get().head_index; + &mut self.values.get_mut(index).unwrap().value + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// entry.insert("value2"); + /// + /// assert_eq!(map.get(&"key"), Some(&"value2")); + /// ``` + pub fn insert(&mut self, value: Value) -> Value { + let key_index = *self.entry.key(); + let map_entry = self.entry.get_mut(); + let first_index = map_entry.head_index; + let mut entry = self.values.remove(first_index).unwrap(); + let first_value = entry.value; + + while let Some(next_index) = entry.next_index { + entry = self.values.remove(next_index).unwrap(); + } + + let value_entry = ValueEntry::new(key_index, value); + let index = self.values.push_back(value_entry); + map_entry.head_index = index; + map_entry.length = 1; + map_entry.tail_index = index; + first_value + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// entry.append("value2"); + /// + /// let mut iter = entry.insert_all("value3"); + /// assert_eq!(iter.next(), Some("value1")); + /// assert_eq!(iter.next(), Some("value2")); + /// assert_eq!(iter.next(), None); + /// ``` + pub fn insert_all(&mut self, value: Value) -> EntryValuesDrain<'_, Key, Value> { + let key_index = *self.entry.key(); + let map_entry = self.entry.get_mut(); + let value_entry = ValueEntry::new(key_index, value); + let index = self.values.push_back(value_entry); + let iter = EntryValuesDrain::from_map_entry(self.values, map_entry); + map_entry.reset(index); + iter + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// assert_eq!(entry.into_mut(), &mut "value"); + /// ``` + #[must_use] + pub fn into_mut(mut self) -> &'map mut Value { + let index = self.entry.get_mut().head_index; + &mut self.values.get_mut(index).unwrap().value + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// entry.append("value2"); + /// + /// let mut iter = entry.iter(); + /// assert_eq!(iter.next(), Some(&"value1")); + /// assert_eq!(iter.next(), Some(&"value2")); + /// assert_eq!(iter.next(), None); + /// ``` + #[must_use] + pub fn iter(&self) -> EntryValues<'_, Key, Value> { + let map_entry = self.entry.get(); + EntryValues::from_map_entry(self.values, map_entry) + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// entry.append("value2"); + /// + /// let mut iter = entry.iter_mut(); + /// assert_eq!(iter.next(), Some(&mut "value1")); + /// assert_eq!(iter.next(), Some(&mut "value2")); + /// assert_eq!(iter.next(), None); + /// ``` + #[must_use] + pub fn iter_mut(&mut self) -> EntryValuesMut<'_, Key, Value> { + let map_entry = self.entry.get_mut(); + EntryValuesMut::from_map_entry(self.values, map_entry) + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// assert_eq!(entry.key(), &"key"); + /// ``` + #[must_use] + pub fn key(&self) -> &Key { + let key_index = self.entry.key(); + self.keys.get(*key_index).unwrap() + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// assert_eq!(entry.len(), 1); + /// + /// entry.append("value2"); + /// assert_eq!(entry.len(), 2); + /// ``` + #[must_use] + pub fn len(&self) -> usize { + self.entry.get().length + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// assert_eq!(entry.remove(), "value"); + /// ``` + pub fn remove(self) -> Value { + self.remove_entry().1 + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// entry.append("value2"); + /// + /// let mut iter = entry.remove_all(); + /// assert_eq!(iter.next(), Some("value1")); + /// assert_eq!(iter.next(), Some("value2")); + /// assert_eq!(iter.next(), None); + /// ``` + pub fn remove_all(self) -> EntryValuesDrain<'map, Key, Value> { + self.remove_entry_all().1 + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// assert_eq!(entry.remove_entry(), ("key", "value")); + /// ``` + pub fn remove_entry(self) -> (Key, Value) { + let (key_index, map_entry) = self.entry.remove_entry(); + let key = self.keys.remove(key_index).unwrap(); + let first_index = map_entry.head_index; + let mut entry = self.values.remove(first_index).unwrap(); + let first_value = entry.value; + + while let Some(next_index) = entry.next_index { + entry = self.values.remove(next_index).unwrap(); + } + + (key, first_value) + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// map.insert("key", "value1"); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Occupied(entry) => entry, + /// _ => panic!("expected occupied entry") + /// }; + /// + /// entry.append("value2"); + /// + /// let (key, mut iter) = entry.remove_entry_all(); + /// assert_eq!(key, "key"); + /// assert_eq!(iter.next(), Some("value1")); + /// assert_eq!(iter.next(), Some("value2")); + /// assert_eq!(iter.next(), None); + /// ``` + pub fn remove_entry_all(self) -> (Key, EntryValuesDrain<'map, Key, Value>) { + let (key_index, map_entry) = self.entry.remove_entry(); + let key = self.keys.remove(key_index).unwrap(); + let iter = EntryValuesDrain { + head_index: Some(map_entry.head_index), + remaining: map_entry.length, + tail_index: Some(map_entry.tail_index), + values: self.values, + }; + (key, iter) + } +} + +impl Debug for OccupiedEntry<'_, Key, Value> +where + Key: Debug, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter + .debug_struct("OccupiedEntry") + .field("key", self.key()) + .field("values", &self.iter()) + .finish() + } +} + +/// A view into a vacant entry in the multimap. +pub struct VacantEntry<'map, Key, Value, State = RandomState> { + /// The builder hasher for the map, kept separately for mutability concerns. + build_hasher: &'map State, + + /// The hash of the key for the entry. + hash: u64, + + /// The key for this entry for when it is to be inserted into the map. + key: Key, + + keys: &'map mut VecList, + + /// Reference to the multimap. + map: &'map mut HashMap, MapEntry, DummyState>, + + values: &'map mut VecList>, +} + +impl<'map, Key, Value, State> VacantEntry<'map, Key, Value, State> +where + Key: Eq + Hash, + State: BuildHasher, +{ + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Vacant(entry) => entry, + /// _ => panic!("expected vacant entry") + /// }; + /// + /// assert_eq!(entry.insert("value"), &"value"); + /// ``` + pub fn insert(self, value: Value) -> &'map mut Value { + let build_hasher = self.build_hasher; + let entry = match raw_entry_mut(self.keys, self.map, self.hash, &self.key) { + RawEntryMut::Vacant(entry) => entry, + _ => panic!("expected vacant entry"), + }; + let key_index = self.keys.push_back(self.key); + let value_entry = ValueEntry::new(key_index, value); + let index = self.values.push_back(value_entry); + let map_entry = MapEntry::new(index); + let keys = &self.keys; + let _ = entry.insert_with_hasher(self.hash, key_index, map_entry, |&key_index| { + let key = keys.get(key_index).unwrap(); + hash_key(build_hasher, key) + }); + + &mut self.values.get_mut(index).unwrap().value + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map = ListOrderedMultimap::new(); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Vacant(entry) => entry, + /// _ => panic!("expected vacant entry") + /// }; + /// + /// let mut entry = entry.insert_entry("value"); + /// assert_eq!(entry.get(), &"value"); + /// ``` + pub fn insert_entry(self, value: Value) -> OccupiedEntry<'map, Key, Value> { + let build_hasher = self.build_hasher; + let entry = match raw_entry_mut(self.keys, self.map, self.hash, &self.key) { + RawEntryMut::Vacant(entry) => entry, + _ => panic!("expected vacant entry"), + }; + let key_index = self.keys.push_back(self.key); + let value_entry = ValueEntry::new(key_index, value); + let index = self.values.push_back(value_entry); + let map_entry = MapEntry::new(index); + let keys = &self.keys; + let _ = entry.insert_with_hasher(self.hash, key_index, map_entry, |&key_index| { + let key = keys.get(key_index).unwrap(); + hash_key(build_hasher, key) + }); + + let key = self.keys.get(key_index).unwrap(); + let entry = match raw_entry_mut(self.keys, self.map, self.hash, key) { + RawEntryMut::Occupied(entry) => entry, + _ => panic!("expected occupied entry"), + }; + + OccupiedEntry { + entry, + keys: self.keys, + values: self.values, + } + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Vacant(entry) => entry, + /// _ => panic!("expected vacant entry") + /// }; + /// + /// assert_eq!(entry.into_key(), "key"); + /// ``` + #[must_use] + pub fn into_key(self) -> Key { + self.key + } + + /// # Examples + /// + /// ``` + /// use ordered_multimap::ListOrderedMultimap; + /// use ordered_multimap::list_ordered_multimap::Entry; + /// + /// let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + /// + /// let mut entry = match map.entry("key") { + /// Entry::Vacant(entry) => entry, + /// _ => panic!("expected vacant entry") + /// }; + /// + /// assert_eq!(entry.key(), &"key"); + /// ``` + #[must_use] + pub fn key(&self) -> &Key { + &self.key + } +} + +impl Debug for VacantEntry<'_, Key, Value, State> +where + Key: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter + .debug_tuple("VacantEntry") + .field(&self.key) + .finish() + } +} + +/// An iterator that yields immutable references to all values of a given key. The order of the values is always in the +/// order that they were inserted. +pub struct EntryValues<'map, Key, Value> { + /// The first index of the values not yet yielded. + head_index: Option>>, + + /// The remaining number of values to be yielded. + remaining: usize, + + /// The last index of the values not yet yielded. + tail_index: Option>>, + + /// The list of the values in the map. This is ordered by time of insertion. + values: &'map VecList>, +} + +impl<'map, Key, Value> EntryValues<'map, Key, Value> { + /// Convenience function for creating an empty iterator. + #[must_use] + fn empty(values: &'map VecList>) -> Self { + EntryValues { + head_index: None, + remaining: 0, + tail_index: None, + values, + } + } + + /// Convenience function for creating a new iterator from a map entry. + #[must_use] + fn from_map_entry( + values: &'map VecList>, + map_entry: &MapEntry, + ) -> Self { + EntryValues { + head_index: Some(map_entry.head_index), + remaining: map_entry.length, + tail_index: Some(map_entry.tail_index), + values, + } + } +} + +impl<'map, Key, Value> Clone for EntryValues<'map, Key, Value> { + fn clone(&self) -> EntryValues<'map, Key, Value> { + EntryValues { + head_index: self.head_index, + remaining: self.remaining, + tail_index: self.tail_index, + values: self.values, + } + } +} + +impl Debug for EntryValues<'_, Key, Value> +where + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("EntryValues(")?; + formatter.debug_list().entries(self.clone()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for EntryValues<'_, Key, Value> { + fn next_back(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.tail_index.map(|index| { + let entry = self.values.get(index).unwrap(); + self.tail_index = entry.previous_index; + self.remaining -= 1; + &entry.value + }) + } + } +} + +impl ExactSizeIterator for EntryValues<'_, Key, Value> {} + +impl FusedIterator for EntryValues<'_, Key, Value> {} + +impl<'map, Key, Value> Iterator for EntryValues<'map, Key, Value> { + type Item = &'map Value; + + fn next(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.head_index.map(|index| { + let entry = self.values.get(index).unwrap(); + self.head_index = entry.next_index; + self.remaining -= 1; + &entry.value + }) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.remaining, Some(self.remaining)) + } +} + +/// An iterator that moves all values of a given key out of a multimap but preserves the underlying capacity. The order +/// of the values is always in the order that they were inserted. +pub struct EntryValuesDrain<'map, Key, Value> { + /// The first index of the values not yet yielded. + head_index: Option>>, + + /// The remaining number of values to be yielded. + remaining: usize, + + /// The last index of the values not yet yielded. + tail_index: Option>>, + + /// The list of the values in the map. This is ordered by time of insertion. + values: &'map mut VecList>, +} + +impl<'map, Key, Value> EntryValuesDrain<'map, Key, Value> { + /// Convenience function for creating an empty iterator. + fn empty(values: &'map mut VecList>) -> Self { + EntryValuesDrain { + head_index: None, + remaining: 0, + tail_index: None, + values, + } + } + + /// Convenience function for creating a new iterator from a map entry. + fn from_map_entry( + values: &'map mut VecList>, + map_entry: &MapEntry, + ) -> Self { + EntryValuesDrain { + head_index: Some(map_entry.head_index), + remaining: map_entry.length, + tail_index: Some(map_entry.tail_index), + values, + } + } + + /// Creates an iterator that yields immutable references to all values of a given key. + #[must_use] + pub fn iter(&self) -> EntryValues<'_, Key, Value> { + EntryValues { + head_index: self.head_index, + remaining: self.remaining, + tail_index: self.tail_index, + values: self.values, + } + } +} + +impl Debug for EntryValuesDrain<'_, Key, Value> +where + Key: Debug, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("EntryValuesDrain(")?; + formatter.debug_list().entries(self.iter()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for EntryValuesDrain<'_, Key, Value> { + fn next_back(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.tail_index.map(|index| { + let entry = self.values.remove(index).unwrap(); + self.tail_index = entry.previous_index; + self.remaining -= 1; + entry.value + }) + } + } +} + +impl Drop for EntryValuesDrain<'_, Key, Value> { + fn drop(&mut self) { + for _ in self {} + } +} + +impl ExactSizeIterator for EntryValuesDrain<'_, Key, Value> {} + +impl FusedIterator for EntryValuesDrain<'_, Key, Value> {} + +impl Iterator for EntryValuesDrain<'_, Key, Value> { + type Item = Value; + + fn next(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.head_index.map(|index| { + let entry = self.values.remove(index).unwrap(); + self.head_index = entry.next_index; + self.remaining -= 1; + entry.value + }) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.remaining, Some(self.remaining)) + } +} + +/// An iterator that yields mutable references to all values of a given key. The order of the values is always in the +/// order that they were inserted. +pub struct EntryValuesMut<'map, Key, Value> { + /// The first index of the values not yet yielded. + head_index: Option>>, + + /// Because [`EntryValuesMut::values`] is a pointer, we need to have a phantom data here for the lifetime parameter. + phantom: PhantomData<&'map mut VecList>>, + + /// The remaining number of values to be yielded. + remaining: usize, + + /// The last index of the values not yet yielded. + tail_index: Option>>, + + /// The list of the values in the map. This is ordered by time of insertion. + values: *mut VecList>, +} + +impl<'map, Key, Value> EntryValuesMut<'map, Key, Value> { + /// Convenience function for creating an empty iterator. + #[must_use] + fn empty(values: &'map mut VecList>) -> Self { + EntryValuesMut { + head_index: None, + phantom: PhantomData, + remaining: 0, + tail_index: None, + values, + } + } + + /// Convenience function for creating a new iterator from a map entry. + #[must_use] + fn from_map_entry( + values: &'map mut VecList>, + map_entry: &MapEntry, + ) -> Self { + EntryValuesMut { + head_index: Some(map_entry.head_index), + phantom: PhantomData, + remaining: map_entry.length, + tail_index: Some(map_entry.tail_index), + values, + } + } + + /// Creates an iterator that yields immutable references to all values of a given key. + #[must_use] + pub fn iter(&self) -> EntryValues<'_, Key, Value> { + EntryValues { + head_index: self.head_index, + remaining: self.remaining, + tail_index: self.tail_index, + values: unsafe { &*self.values }, + } + } +} + +impl Debug for EntryValuesMut<'_, Key, Value> +where + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("EntryValuesMut(")?; + formatter.debug_list().entries(self.iter()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for EntryValuesMut<'_, Key, Value> { + fn next_back(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.tail_index.map(|index| { + let entry = unsafe { (*self.values).get_mut(index) }.unwrap(); + self.tail_index = entry.previous_index; + self.remaining -= 1; + &mut entry.value + }) + } + } +} + +impl ExactSizeIterator for EntryValuesMut<'_, Key, Value> {} + +impl FusedIterator for EntryValuesMut<'_, Key, Value> {} + +impl<'map, Key, Value> Iterator for EntryValuesMut<'map, Key, Value> { + type Item = &'map mut Value; + + fn next(&mut self) -> Option { + if self.remaining == 0 { + None + } else { + self.head_index.map(|index| { + let entry = unsafe { (*self.values).get_mut(index) }.unwrap(); + self.head_index = entry.next_index; + self.remaining -= 1; + &mut entry.value + }) + } + } + + fn size_hint(&self) -> (usize, Option) { + (self.remaining, Some(self.remaining)) + } +} + +unsafe impl Send for EntryValuesMut<'_, Key, Value> +where + Key: Send, + Value: Send, +{ +} + +unsafe impl Sync for EntryValuesMut<'_, Key, Value> +where + Key: Sync, + Value: Sync, +{ +} + +/// An iterator that owns and yields all key-value pairs in a multimap by cloning the keys for their possibly multiple +/// values. This is unnecessarily expensive whenever [`Iter`] or [`IterMut`] would suit as well. The order of the +/// yielded items is always in the order that they were inserted. +pub struct IntoIter { + // The list of the keys in the map. This is ordered by time of insertion. + keys: VecList, + + /// The iterator over the list of all values. This is ordered by time of insertion. + iter: VecListIntoIter>, +} + +impl IntoIter { + /// Creates an iterator that yields immutable references to all key-value pairs in a multimap. + #[must_use] + pub fn iter(&self) -> Iter<'_, Key, Value> { + Iter { + keys: &self.keys, + iter: self.iter.iter(), + } + } +} + +impl Debug for IntoIter +where + Key: Debug, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("IntoIter(")?; + formatter.debug_list().entries(self.iter()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for IntoIter +where + Key: Clone, +{ + fn next_back(&mut self) -> Option { + let value_entry = self.iter.next_back()?; + let key = self.keys.get(value_entry.key_index).cloned().unwrap(); + Some((key, value_entry.value)) + } +} + +impl ExactSizeIterator for IntoIter where Key: Clone {} + +impl FusedIterator for IntoIter where Key: Clone {} + +impl Iterator for IntoIter +where + Key: Clone, +{ + type Item = (Key, Value); + + fn next(&mut self) -> Option { + let value_entry = self.iter.next()?; + let key = self.keys.get(value_entry.key_index).cloned().unwrap(); + Some((key, value_entry.value)) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +/// An iterator that yields immutable references to all key-value pairs in a multimap. The order of the yielded items is +/// always in the order that they were inserted. +pub struct Iter<'map, Key, Value> { + // The list of the keys in the map. This is ordered by time of insertion. + keys: &'map VecList, + + /// The iterator over the list of all values. This is ordered by time of insertion. + iter: VecListIter<'map, ValueEntry>, +} + +impl<'map, Key, Value> Clone for Iter<'map, Key, Value> { + fn clone(&self) -> Iter<'map, Key, Value> { + Iter { + keys: self.keys, + iter: self.iter.clone(), + } + } +} + +impl Debug for Iter<'_, Key, Value> +where + Key: Debug, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("Iter(")?; + formatter.debug_list().entries(self.clone()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for Iter<'_, Key, Value> { + fn next_back(&mut self) -> Option { + let value_entry = self.iter.next_back()?; + let key = self.keys.get(value_entry.key_index).unwrap(); + Some((key, &value_entry.value)) + } +} + +impl ExactSizeIterator for Iter<'_, Key, Value> {} + +impl FusedIterator for Iter<'_, Key, Value> {} + +impl<'map, Key, Value> Iterator for Iter<'map, Key, Value> { + type Item = (&'map Key, &'map Value); + + fn next(&mut self) -> Option { + let value_entry = self.iter.next()?; + let key = self.keys.get(value_entry.key_index).unwrap(); + Some((key, &value_entry.value)) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +/// An iterator that yields mutable references to all key-value pairs in a multimap. The order of the yielded items is +/// always in the order that they were inserted. +pub struct IterMut<'map, Key, Value> { + // The list of the keys in the map. This is ordered by time of insertion. + keys: &'map VecList, + + /// The iterator over the list of all values. This is ordered by time of insertion. + iter: VecListIterMut<'map, ValueEntry>, +} + +impl IterMut<'_, Key, Value> { + /// Creates an iterator that yields immutable references to all key-value pairs in a multimap. + #[must_use] + pub fn iter(&self) -> Iter<'_, Key, Value> { + Iter { + keys: self.keys, + iter: self.iter.iter(), + } + } +} + +impl Debug for IterMut<'_, Key, Value> +where + Key: Debug, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("IterMut(")?; + formatter.debug_list().entries(self.iter()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for IterMut<'_, Key, Value> { + fn next_back(&mut self) -> Option { + let value_entry = self.iter.next_back()?; + let key = self.keys.get(value_entry.key_index).unwrap(); + Some((key, &mut value_entry.value)) + } +} + +impl ExactSizeIterator for IterMut<'_, Key, Value> {} + +impl FusedIterator for IterMut<'_, Key, Value> {} + +impl<'map, Key, Value> Iterator for IterMut<'map, Key, Value> { + type Item = (&'map Key, &'map mut Value); + + fn next(&mut self) -> Option { + let value_entry = self.iter.next()?; + let key = self.keys.get(value_entry.key_index).unwrap(); + Some((key, &mut value_entry.value)) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +/// An iterator that yields immutable references to all keys and their value iterators. The order of the yielded items +/// is always in the order the keys were first inserted. +pub struct KeyValues<'map, Key, Value, State = RandomState> { + /// The builder hasher for the map, kept separately for mutability concerns. + build_hasher: &'map State, + + // The list of the keys in the map. This is ordered by time of insertion. + keys: &'map VecList, + + /// The iterator over the list of all values. This is ordered by time of insertion. + iter: VecListIter<'map, Key>, + + /// The internal mapping from key hashes to associated value indices. + map: &'map HashMap, MapEntry, DummyState>, + + /// The list of the values in the map. This is ordered by time of insertion. + values: &'map VecList>, +} + +impl<'map, Key, Value, State> Clone for KeyValues<'map, Key, Value, State> { + fn clone(&self) -> KeyValues<'map, Key, Value, State> { + KeyValues { + build_hasher: self.build_hasher, + keys: self.keys, + iter: self.iter.clone(), + map: self.map, + values: self.values, + } + } +} + +impl Debug for KeyValues<'_, Key, Value, State> +where + Key: Debug + Eq + Hash, + State: BuildHasher, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("KeyValues(")?; + formatter.debug_list().entries(self.clone()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for KeyValues<'_, Key, Value, State> +where + Key: Eq + Hash, + State: BuildHasher, +{ + fn next_back(&mut self) -> Option { + let key = self.iter.next_back()?; + let hash = hash_key(self.build_hasher, key); + let (_, map_entry) = raw_entry(self.keys, self.map, hash, key).unwrap(); + let iter = EntryValues::from_map_entry(self.values, map_entry); + Some((key, iter)) + } +} + +impl ExactSizeIterator for KeyValues<'_, Key, Value, State> +where + Key: Eq + Hash, + State: BuildHasher, +{ +} + +impl FusedIterator for KeyValues<'_, Key, Value, State> +where + Key: Eq + Hash, + State: BuildHasher, +{ +} + +impl<'map, Key, Value, State> Iterator for KeyValues<'map, Key, Value, State> +where + Key: Eq + Hash, + State: BuildHasher, +{ + type Item = (&'map Key, EntryValues<'map, Key, Value>); + + fn next(&mut self) -> Option { + let key = self.iter.next()?; + let hash = hash_key(self.build_hasher, key); + let (_, map_entry) = raw_entry(self.keys, self.map, hash, key).unwrap(); + let iter = EntryValues::from_map_entry(self.values, map_entry); + Some((key, iter)) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +/// An iterator that yields mutable references to all keys and their value iterators. The order of the yielded items is +/// always in the order the keys were first inserted. +pub struct KeyValuesMut<'map, Key, Value, State = RandomState> { + /// The builder hasher for the map, kept separately for mutability concerns. + build_hasher: &'map State, + + // The list of the keys in the map. This is ordered by time of insertion. + keys: &'map VecList, + + /// The iterator over the list of all values. This is ordered by time of insertion. + iter: VecListIter<'map, Key>, + + /// The internal mapping from key hashes to associated value indices. + map: &'map HashMap, MapEntry, DummyState>, + + /// The list of the values in the map. This is ordered by time of insertion. + values: *mut VecList>, +} + +impl KeyValuesMut<'_, Key, Value, State> { + /// Creates an iterator that yields mutable references to all key-value pairs of a multimap. + #[must_use] + pub fn iter(&self) -> KeyValues<'_, Key, Value, State> { + KeyValues { + build_hasher: self.build_hasher, + keys: self.keys, + iter: self.iter.clone(), + map: self.map, + values: unsafe { &*self.values }, + } + } +} + +impl Debug for KeyValuesMut<'_, Key, Value, State> +where + Key: Debug + Eq + Hash, + State: BuildHasher, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("KeyValuesMut(")?; + formatter.debug_list().entries(self.iter()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for KeyValuesMut<'_, Key, Value, State> +where + Key: Eq + Hash, + State: BuildHasher, +{ + fn next_back(&mut self) -> Option { + let key = self.iter.next_back()?; + let hash = hash_key(self.build_hasher, key); + let (_, map_entry) = raw_entry(self.keys, self.map, hash, key).unwrap(); + let iter = EntryValuesMut::from_map_entry(unsafe { &mut *self.values }, map_entry); + Some((key, iter)) + } +} + +impl ExactSizeIterator for KeyValuesMut<'_, Key, Value, State> +where + Key: Eq + Hash, + State: BuildHasher, +{ +} + +impl FusedIterator for KeyValuesMut<'_, Key, Value, State> +where + Key: Eq + Hash, + State: BuildHasher, +{ +} + +impl<'map, Key, Value, State> Iterator for KeyValuesMut<'map, Key, Value, State> +where + Key: Eq + Hash, + State: BuildHasher, +{ + type Item = (&'map Key, EntryValuesMut<'map, Key, Value>); + + fn next(&mut self) -> Option { + let key = self.iter.next()?; + let hash = hash_key(self.build_hasher, key); + let (_, map_entry) = raw_entry(self.keys, self.map, hash, key).unwrap(); + let iter = EntryValuesMut::from_map_entry(unsafe { &mut *self.values }, map_entry); + Some((key, iter)) + } + + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +unsafe impl Send for KeyValuesMut<'_, Key, Value> +where + Key: Send, + Value: Send, +{ +} + +unsafe impl Sync for KeyValuesMut<'_, Key, Value> +where + Key: Sync, + Value: Sync, +{ +} + +/// An iterator that yields immutable references to all keys in the multimap. The order of the keys is always in the +/// order that they were first inserted. +pub struct Keys<'map, Key>(VecListIter<'map, Key>); + +impl<'map, Key> Clone for Keys<'map, Key> { + fn clone(&self) -> Keys<'map, Key> { + Keys(self.0.clone()) + } +} + +impl Debug for Keys<'_, Key> +where + Key: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("Keys(")?; + formatter.debug_list().entries(self.clone()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for Keys<'_, Key> { + fn next_back(&mut self) -> Option { + self.0.next_back() + } +} + +impl ExactSizeIterator for Keys<'_, Key> {} + +impl FusedIterator for Keys<'_, Key> {} + +impl<'map, Key> Iterator for Keys<'map, Key> { + type Item = &'map Key; + + fn next(&mut self) -> Option { + self.0.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +/// An iterator that yields immutable references to all values of a multimap. The order of the values is always in the +/// order that they were inserted. +pub struct Values<'map, Key, Value>(VecListIter<'map, ValueEntry>); + +impl<'map, Key, Value> Clone for Values<'map, Key, Value> { + fn clone(&self) -> Values<'map, Key, Value> { + Values(self.0.clone()) + } +} + +impl Debug for Values<'_, Key, Value> +where + Key: Debug, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("Values(")?; + formatter.debug_list().entries(self.clone()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for Values<'_, Key, Value> { + fn next_back(&mut self) -> Option { + self.0.next_back().map(|entry| &entry.value) + } +} + +impl ExactSizeIterator for Values<'_, Key, Value> {} + +impl FusedIterator for Values<'_, Key, Value> {} + +impl<'map, Key, Value> Iterator for Values<'map, Key, Value> { + type Item = &'map Value; + + fn next(&mut self) -> Option { + self.0.next().map(|entry| &entry.value) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +/// An iterator that yields mutable references to all values of a multimap. The order of the values is always in the +/// order that they were inserted. +pub struct ValuesMut<'map, Key, Value>(VecListIterMut<'map, ValueEntry>); + +impl ValuesMut<'_, Key, Value> { + /// Creates an iterator that yields immutable references to all values of a multimap. + #[must_use] + pub fn iter(&self) -> Values<'_, Key, Value> { + Values(self.0.iter()) + } +} + +impl Debug for ValuesMut<'_, Key, Value> +where + Key: Debug, + Value: Debug, +{ + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + formatter.write_str("ValuesMut(")?; + formatter.debug_list().entries(self.iter()).finish()?; + formatter.write_str(")") + } +} + +impl DoubleEndedIterator for ValuesMut<'_, Key, Value> { + fn next_back(&mut self) -> Option { + self.0.next_back().map(|entry| &mut entry.value) + } +} + +impl ExactSizeIterator for ValuesMut<'_, Key, Value> {} + +impl FusedIterator for ValuesMut<'_, Key, Value> {} + +impl<'map, Key, Value> Iterator for ValuesMut<'map, Key, Value> { + type Item = &'map mut Value; + + fn next(&mut self) -> Option { + self.0.next().map(|entry| &mut entry.value) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +/// Dummy builder hasher that is not meant to be used. It is simply a placeholder. +#[derive(Clone, Debug)] +pub(crate) struct DummyState; + +impl BuildHasher for DummyState { + type Hasher = DummyHasher; + + fn build_hasher(&self) -> Self::Hasher { + DummyHasher + } +} + +/// Dummy hasher that is not meant to be used. It is simply a placeholder. +#[derive(Clone, Debug)] +pub struct DummyHasher; + +impl Hasher for DummyHasher { + fn finish(&self) -> u64 { + unimplemented!(); + } + + fn write(&mut self, _: &[u8]) { + unimplemented!(); + } +} + +/// Computes the hash value of the given key. +#[must_use] +fn hash_key(state: &State, key: &KeyQuery) -> u64 +where + KeyQuery: ?Sized + Eq + Hash, + State: BuildHasher, +{ + let mut hasher = state.build_hasher(); + key.hash(&mut hasher); + hasher.finish() +} + +#[must_use] +fn raw_entry<'map, Key, KeyQuery, Value, State>( + keys: &VecList, + map: &'map HashMap, MapEntry, State>, + hash: u64, + key: &KeyQuery, +) -> Option<(&'map Index, &'map MapEntry)> +where + Key: Borrow + Eq + Hash, + KeyQuery: ?Sized + Eq + Hash, + State: BuildHasher, +{ + map.raw_entry().from_hash(hash, |&key_index| { + let existing_key = keys.get(key_index).unwrap(); + key == existing_key.borrow() + }) +} + +#[must_use] +fn raw_entry_mut<'map, Key, KeyQuery, Value, State>( + keys: &VecList, + map: &'map mut HashMap, MapEntry, State>, + hash: u64, + key: &KeyQuery, +) -> RawEntryMut<'map, Index, MapEntry, State> +where + Key: Borrow + Eq + Hash, + KeyQuery: ?Sized + Eq + Hash, + State: BuildHasher, +{ + map.raw_entry_mut().from_hash(hash, |&key_index| { + let existing_key = keys.get(key_index).unwrap(); + key == existing_key.borrow() + }) +} + +#[must_use] +fn raw_entry_mut_empty<'map, Key, KeyQuery, Value, State>( + keys: &VecList, + map: &'map mut HashMap, MapEntry, State>, + hash: u64, +) -> RawEntryMut<'map, Index, MapEntry, State> +where + Key: Borrow + Eq + Hash, + KeyQuery: ?Sized + Eq + Hash, + State: BuildHasher, +{ + map + .raw_entry_mut() + .from_hash(hash, |&key_index| keys.get(key_index).is_none()) +} + +#[allow(unused_results)] +#[cfg(all(test, feature = "std"))] +mod test { + use coverage_helper::test; + + use super::*; + + #[test] + fn test_bounds() { + fn check_bounds() {} + + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + check_bounds::>(); + } + + #[test] + fn test_collision() { + struct TestBuildHasher; + + impl BuildHasher for TestBuildHasher { + type Hasher = TestHasher; + + fn build_hasher(&self) -> Self::Hasher { + TestHasher + } + } + + struct TestHasher; + + impl Hasher for TestHasher { + fn finish(&self) -> u64 { + 0 + } + + fn write(&mut self, _: &[u8]) {} + } + + let mut map = ListOrderedMultimap::with_hasher(TestBuildHasher); + let state = map.hasher(); + + assert_eq!(hash_key(state, "key1"), hash_key(state, "key2")); + + map.insert("key1", "value1"); + assert_eq!(map.get(&"key1"), Some(&"value1")); + + map.insert("key2", "value2"); + assert_eq!(map.get(&"key2"), Some(&"value2")); + } + + #[test] + fn test_no_collision() { + let state = RandomState::new(); + let hash_1 = hash_key(&state, "key1"); + let hash_2 = hash_key(&state, "key2"); + + assert!(hash_1 != hash_2); + } + + #[test] + fn test_entry_and_modify() { + let mut map = ListOrderedMultimap::new(); + map + .entry("key") + .and_modify(|_| panic!("entry should be vacant")); + + map.insert("key", "value1"); + map.entry("key").and_modify(|value| *value = "value2"); + assert_eq!(map.get(&"key"), Some(&"value2")); + } + + #[test] + fn test_entry_or_insert() { + let mut map = ListOrderedMultimap::new(); + let value = map.entry("key").or_insert("value1"); + assert_eq!(value, &"value1"); + + let value = map.entry("key").or_insert("value2"); + assert_eq!(value, &"value1"); + } + + #[test] + fn test_entry_or_insert_entry() { + let mut map = ListOrderedMultimap::new(); + let entry = map.entry("key").or_insert_entry("value1"); + assert_eq!(entry.get(), &"value1"); + + let entry = map.entry("key").or_insert_entry("value2"); + assert_eq!(entry.get(), &"value1"); + } + + #[test] + fn test_entry_or_insert_with() { + let mut map = ListOrderedMultimap::new(); + let value = map.entry("key").or_insert_with(|| "value1"); + assert_eq!(value, &"value1"); + + let value = map.entry("key").or_insert_with(|| "value2"); + assert_eq!(value, &"value1"); + } + + #[test] + fn test_entry_or_insert_with_entry() { + let mut map = ListOrderedMultimap::new(); + let entry = map.entry("key").or_insert_with_entry(|| "value1"); + assert_eq!(entry.get(), &"value1"); + + let entry = map.entry("key").or_insert_with_entry(|| "value2"); + assert_eq!(entry.get(), &"value1"); + } + + #[test] + fn test_entry_debug() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let entry = map.entry("key"); + + assert_eq!(format!("{entry:?}"), r#"VacantEntry("key")"#); + } + + #[test] + fn test_entry_values_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + map.append("key", "value4"); + + let iter = map.get_all(&"key"); + assert_eq!( + format!("{iter:?}"), + r#"EntryValues(["value1", "value2", "value3", "value4"])"# + ); + } + + #[test] + fn test_entry_values_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + map.append("key", "value4"); + + let mut iter = map.get_all(&"key"); + assert_eq!(iter.next(), Some(&"value1")); + assert_eq!(iter.next_back(), Some(&"value4")); + assert_eq!(iter.next(), Some(&"value2")); + assert_eq!(iter.next_back(), Some(&"value3")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_entry_values_drain_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + map.append("key", "value4"); + + let iter = map.remove_all(&"key"); + assert_eq!( + format!("{iter:?}"), + r#"EntryValuesDrain(["value1", "value2", "value3", "value4"])"# + ); + } + + #[test] + fn test_entry_values_drain_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + map.append("key", "value4"); + + let mut iter = map.remove_all(&"key"); + assert_eq!(iter.next(), Some("value1")); + assert_eq!(iter.next_back(), Some("value4")); + assert_eq!(iter.next(), Some("value2")); + assert_eq!(iter.next_back(), Some("value3")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_entry_values_drain_empty() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.remove_all(&"key"); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_entry_values_drain_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.remove_all(&"key"); + assert_eq!(iter.next(), Some("value")); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_entry_values_drain_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + map.append("key", "value4"); + + let mut iter = map.remove_all(&"key"); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_entry_values_empty() { + let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.get_all(&"key"); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_entry_values_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.get_all(&"key"); + assert_eq!(iter.next(), Some(&"value")); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_entry_values_mut_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + map.append("key", "value4"); + + let iter = map.get_all_mut(&"key"); + assert_eq!( + format!("{iter:?}"), + r#"EntryValuesMut(["value1", "value2", "value3", "value4"])"# + ); + } + + #[test] + fn test_entry_values_mut_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + map.append("key", "value4"); + + let mut iter = map.get_all_mut(&"key"); + assert_eq!(iter.next(), Some(&mut "value1")); + assert_eq!(iter.next_back(), Some(&mut "value4")); + assert_eq!(iter.next(), Some(&mut "value2")); + assert_eq!(iter.next_back(), Some(&mut "value3")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_entry_values_mut_empty() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.get_all_mut(&"key"); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_entry_values_mut_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.get_all_mut(&"key"); + assert_eq!(iter.next(), Some(&mut "value")); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_entry_values_mut_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + map.append("key", "value4"); + + let mut iter = map.get_all_mut(&"key"); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_entry_values_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + map.append("key", "value4"); + + let mut iter = map.get_all(&"key"); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_iter_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let iter = map.iter(); + assert_eq!( + format!("{iter:?}"), + r#"Iter([("key1", "value1"), ("key2", "value2"), ("key2", "value3"), ("key1", "value4")])"# + ); + } + + #[test] + fn test_iter_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.iter(); + assert_eq!(iter.next(), Some((&"key1", &"value1"))); + assert_eq!(iter.next_back(), Some((&"key1", &"value4"))); + assert_eq!(iter.next(), Some((&"key2", &"value2"))); + assert_eq!(iter.next_back(), Some((&"key2", &"value3"))); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_iter_empty() { + let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.iter(); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_iter_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.iter(); + assert_eq!(iter.next(), Some((&"key", &"value"))); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_iter_mut_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let iter = map.iter_mut(); + assert_eq!( + format!("{iter:?}"), + r#"IterMut([("key1", "value1"), ("key2", "value2"), ("key2", "value3"), ("key1", "value4")])"# + ); + } + + #[test] + fn test_iter_mut_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.iter_mut(); + assert_eq!(iter.next(), Some((&"key1", &mut "value1"))); + assert_eq!(iter.next_back(), Some((&"key1", &mut "value4"))); + assert_eq!(iter.next(), Some((&"key2", &mut "value2"))); + assert_eq!(iter.next_back(), Some((&"key2", &mut "value3"))); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_iter_mut_empty() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.iter_mut(); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_iter_mut_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.iter_mut(); + assert_eq!(iter.next(), Some((&"key", &mut "value"))); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_iter_mut_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.iter_mut(); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_iter_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.iter(); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_into_iter_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let iter = map.into_iter(); + assert_eq!( + format!("{iter:?}"), + r#"IntoIter([("key1", "value1"), ("key2", "value2"), ("key2", "value3"), ("key1", "value4")])"# + ); + } + + #[test] + fn test_into_iter_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.into_iter(); + assert_eq!(iter.next(), Some(("key1", "value1"))); + assert_eq!(iter.next_back(), Some(("key1", "value4"))); + assert_eq!(iter.next(), Some(("key2", "value2"))); + assert_eq!(iter.next_back(), Some(("key2", "value3"))); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_into_iter_empty() { + let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.into_iter(); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_into_iter_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.into_iter(); + assert_eq!(iter.next(), Some(("key", "value"))); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_into_iter_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.into_iter(); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_key_values_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let iter = map.pairs(); + assert_eq!( + format!("{iter:?}"), + r#"KeyValues([("key1", EntryValues(["value1", "value4"])), ("key2", EntryValues(["value2", "value3"]))])"# + ); + } + + #[test] + fn test_key_values_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.pairs(); + + let (key, mut values) = iter.next().unwrap(); + assert_eq!(key, &"key1"); + assert_eq!(values.next(), Some(&"value1")); + assert_eq!(values.next(), Some(&"value4")); + assert_eq!(values.next(), None); + + let (key, mut values) = iter.next_back().unwrap(); + assert_eq!(key, &"key2"); + assert_eq!(values.next(), Some(&"value2")); + assert_eq!(values.next(), Some(&"value3")); + assert_eq!(values.next(), None); + + assert!(iter.next().is_none()); + assert!(iter.next_back().is_none()); + } + + #[test] + fn test_key_values_empty() { + let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.pairs(); + assert!(iter.next_back().is_none()); + assert!(iter.next().is_none()); + } + + #[test] + fn test_key_values_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.pairs(); + + let (key, mut values) = iter.next().unwrap(); + assert_eq!(key, &"key"); + assert_eq!(values.next(), Some(&"value")); + assert_eq!(values.next(), None); + + assert!(iter.next().is_none()); + assert!(iter.next_back().is_none()); + assert!(iter.next().is_none()); + assert!(iter.next_back().is_none()); + assert!(iter.next().is_none()); + } + + #[test] + fn test_key_values_mut_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let iter = map.pairs_mut(); + assert_eq!( + format!("{iter:?}"), + r#"KeyValuesMut([("key1", EntryValues(["value1", "value4"])), ("key2", EntryValues(["value2", "value3"]))])"# + ); + } + + #[test] + fn test_key_values_mut_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.pairs_mut(); + + let (key, mut values) = iter.next().unwrap(); + assert_eq!(key, &"key1"); + assert_eq!(values.next(), Some(&mut "value1")); + assert_eq!(values.next(), Some(&mut "value4")); + assert_eq!(values.next(), None); + + let (key, mut values) = iter.next_back().unwrap(); + assert_eq!(key, &"key2"); + assert_eq!(values.next(), Some(&mut "value2")); + assert_eq!(values.next(), Some(&mut "value3")); + assert_eq!(values.next(), None); + + assert!(iter.next().is_none()); + assert!(iter.next_back().is_none()); + } + + #[test] + fn test_key_values_mut_empty() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.pairs_mut(); + assert!(iter.next_back().is_none()); + assert!(iter.next().is_none()); + } + + #[test] + fn test_key_values_mut_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.pairs_mut(); + + let (key, mut values) = iter.next().unwrap(); + assert_eq!(key, &"key"); + assert_eq!(values.next(), Some(&mut "value")); + assert_eq!(values.next(), None); + + assert!(iter.next().is_none()); + assert!(iter.next_back().is_none()); + assert!(iter.next().is_none()); + assert!(iter.next_back().is_none()); + assert!(iter.next().is_none()); + } + + #[test] + fn test_key_values_mut_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.pairs_mut(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_key_values_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.pairs(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_keys_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let iter = map.keys(); + assert_eq!(format!("{iter:?}"), r#"Keys(["key1", "key2"])"#); + } + + #[test] + fn test_keys_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.keys(); + assert_eq!(iter.next(), Some(&"key1")); + assert_eq!(iter.next_back(), Some(&"key2")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_keys_empty() { + let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.keys(); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_keys_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.keys(); + assert_eq!(iter.next(), Some(&"key")); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_keys_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.keys(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_list_ordered_multimap_append() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.entry_len(&"key"), 0); + + let already_exists = map.append("key", "value1"); + assert!(!already_exists); + assert_eq!(map.entry_len(&"key"), 1); + + let already_exists = map.append("key", "value2"); + assert!(already_exists); + assert_eq!(map.entry_len(&"key"), 2); + + let mut iter = map.get_all(&"key"); + assert_eq!(iter.next(), Some(&"value1")); + assert_eq!(iter.next(), Some(&"value2")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_back() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.back(), None); + + map.insert("key1", "value1"); + assert_eq!(map.back(), Some((&"key1", &"value1"))); + + map.append("key2", "value2"); + assert_eq!(map.back(), Some((&"key2", &"value2"))); + + map.remove(&"key2"); + assert_eq!(map.back(), Some((&"key1", &"value1"))); + + map.remove(&"key1"); + assert_eq!(map.back(), None); + } + + #[test] + fn test_list_ordered_multimap_back_mut() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.back(), None); + + map.insert("key1", "value1"); + assert_eq!(map.back(), Some((&"key1", &"value1"))); + + map.append("key2", "value2"); + assert_eq!(map.back(), Some((&"key2", &"value2"))); + + map.remove(&"key2"); + assert_eq!(map.back(), Some((&"key1", &"value1"))); + + map.remove(&"key1"); + assert_eq!(map.back(), None); + } + + #[test] + fn test_list_ordered_multimap_clear() { + let mut map = ListOrderedMultimap::new(); + map.insert("key", "value"); + map.insert("key2", "value"); + + map.clear(); + + assert!(map.is_empty()); + assert_eq!(map.get(&"key"), None); + assert_eq!(map.get(&"key2"), None); + } + + #[test] + fn test_list_ordered_multimap_contains_key() { + let mut map = ListOrderedMultimap::new(); + assert!(!map.contains_key(&"key")); + + map.insert("key", "value"); + assert!(map.contains_key(&"key")); + } + + #[test] + fn test_list_ordered_multimap_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + assert_eq!( + format!("{map:?}"), + r#"{"key1": "value1", "key2": "value2", "key2": "value3", "key1": "value4"}"# + ); + } + + #[test] + fn test_list_ordered_multimap_entry() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.get(&"key1"), None); + + let value = map.entry("key").or_insert("value1"); + assert_eq!(value, &"value1"); + assert_eq!(map.get(&"key"), Some(&"value1")); + + let value = map.entry("key").or_insert("value2"); + assert_eq!(value, &"value1"); + assert_eq!(map.get(&"key"), Some(&"value1")); + } + + #[test] + fn test_list_ordered_multimap_entry_len() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.entry_len(&"key1"), 0); + + map.insert("key", "value"); + assert_eq!(map.entry_len(&"key"), 1); + + map.insert("key", "value"); + assert_eq!(map.entry_len(&"key"), 1); + + map.append("key", "value"); + assert_eq!(map.entry_len(&"key"), 2); + + map.insert("key", "value"); + assert_eq!(map.entry_len(&"key"), 1); + + map.remove(&"key"); + assert_eq!(map.entry_len(&"key"), 0); + } + + #[test] + fn test_list_ordered_multimap_equality() { + let mut map_1 = ListOrderedMultimap::new(); + + map_1.insert("key1", "value1"); + map_1.insert("key2", "value2"); + map_1.append("key2", "value3"); + map_1.append("key1", "value4"); + + let mut map_2 = map_1.clone(); + map_2.pop_back(); + + assert_ne!(map_1, map_2); + + map_2.append("key1", "value4"); + assert_eq!(map_1, map_2); + } + + #[test] + fn test_list_ordered_multimap_extend() { + let mut map = ListOrderedMultimap::new(); + map.extend(vec![("key1", "value1"), ("key2", "value2"), ("key2", "value3")].into_iter()); + + let mut iter = map.get_all(&"key1"); + assert_eq!(iter.next(), Some(&"value1")); + assert_eq!(iter.next(), None); + + let mut iter = map.get_all(&"key2"); + assert_eq!(iter.next(), Some(&"value2")); + assert_eq!(iter.next(), Some(&"value3")); + assert_eq!(iter.next(), None); + + let mut map = ListOrderedMultimap::new(); + map.extend(vec![(&1, &1), (&2, &1), (&2, &2)].into_iter()); + + let mut iter = map.get_all(&1); + assert_eq!(iter.next(), Some(&1)); + assert_eq!(iter.next(), None); + + let mut iter = map.get_all(&2); + assert_eq!(iter.next(), Some(&1)); + assert_eq!(iter.next(), Some(&2)); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_from_iterator() { + let map: ListOrderedMultimap<_, _, RandomState> = ListOrderedMultimap::from_iter( + vec![("key1", "value1"), ("key2", "value2"), ("key2", "value3")].into_iter(), + ); + + let mut iter = map.get_all(&"key1"); + assert_eq!(iter.next(), Some(&"value1")); + assert_eq!(iter.next(), None); + + let mut iter = map.get_all(&"key2"); + assert_eq!(iter.next(), Some(&"value2")); + assert_eq!(iter.next(), Some(&"value3")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_get() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.get(&"key"), None); + + map.insert("key", "value"); + assert_eq!(map.get(&"key"), Some(&"value")); + } + + #[test] + fn test_list_ordered_multimap_get_all() { + let mut map = ListOrderedMultimap::new(); + + let mut iter = map.get_all(&"key"); + assert_eq!(iter.next(), None); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + + let mut iter = map.get_all(&"key"); + assert_eq!(iter.next(), Some(&"value1")); + assert_eq!(iter.next(), Some(&"value2")); + assert_eq!(iter.next(), Some(&"value3")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_get_all_mut() { + let mut map = ListOrderedMultimap::new(); + + let mut iter = map.get_all(&"key"); + assert_eq!(iter.next(), None); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + + let mut iter = map.get_all_mut(&"key"); + assert_eq!(iter.next(), Some(&mut "value1")); + assert_eq!(iter.next(), Some(&mut "value2")); + assert_eq!(iter.next(), Some(&mut "value3")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_get_mut() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.get_mut(&"key"), None); + + map.insert("key", "value"); + assert_eq!(map.get_mut(&"key"), Some(&mut "value")); + } + + #[test] + fn test_list_ordered_multimap_insert() { + let mut map = ListOrderedMultimap::new(); + assert!(!map.contains_key(&"key")); + assert_eq!(map.get(&"key"), None); + + let value = map.insert("key", "value1"); + assert_eq!(value, None); + assert!(map.contains_key(&"key")); + assert_eq!(map.get(&"key"), Some(&"value1")); + + let value = map.insert("key", "value2"); + assert_eq!(value, Some("value1")); + assert!(map.contains_key(&"key")); + assert_eq!(map.get(&"key"), Some(&"value2")); + } + + #[test] + fn test_list_ordered_multimap_insert_all() { + let mut map = ListOrderedMultimap::new(); + assert!(!map.contains_key(&"key")); + assert_eq!(map.get(&"key"), None); + + { + let mut iter = map.insert_all("key", "value1"); + assert_eq!(iter.next(), None); + } + + assert!(map.contains_key(&"key")); + assert_eq!(map.get(&"key"), Some(&"value1")); + + { + let mut iter = map.insert_all("key", "value2"); + assert_eq!(iter.next(), Some("value1")); + assert_eq!(iter.next(), None); + } + + assert!(map.contains_key(&"key")); + assert_eq!(map.get(&"key"), Some(&"value2")); + } + + #[test] + fn test_list_ordered_multimap_is_empty() { + let mut map = ListOrderedMultimap::new(); + assert!(map.is_empty()); + + map.insert("key", "value"); + assert!(!map.is_empty()); + + map.remove(&"key"); + assert!(map.is_empty()); + } + + #[test] + fn test_list_ordered_multimap_iter() { + let mut map = ListOrderedMultimap::new(); + + let mut iter = map.iter(); + assert_eq!(iter.next(), None); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.iter(); + assert_eq!(iter.next(), Some((&"key1", &"value1"))); + assert_eq!(iter.next(), Some((&"key2", &"value2"))); + assert_eq!(iter.next(), Some((&"key2", &"value3"))); + assert_eq!(iter.next(), Some((&"key1", &"value4"))); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_iter_mut() { + let mut map = ListOrderedMultimap::new(); + + let mut iter = map.iter_mut(); + assert_eq!(iter.next(), None); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.iter_mut(); + assert_eq!(iter.next(), Some((&"key1", &mut "value1"))); + assert_eq!(iter.next(), Some((&"key2", &mut "value2"))); + assert_eq!(iter.next(), Some((&"key2", &mut "value3"))); + assert_eq!(iter.next(), Some((&"key1", &mut "value4"))); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_keys() { + let mut map = ListOrderedMultimap::new(); + + let mut iter = map.keys(); + assert_eq!(iter.next(), None); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.insert("key1", "value3"); + map.insert("key3", "value4"); + + let mut iter = map.keys(); + assert_eq!(iter.next(), Some(&"key1")); + assert_eq!(iter.next(), Some(&"key2")); + assert_eq!(iter.next(), Some(&"key3")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_keys_capacity() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.keys_capacity(), 0); + map.insert("key", "value"); + assert!(map.keys_capacity() > 0); + } + + #[test] + fn test_list_ordered_multimap_keys_len() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.keys_len(), 0); + + map.insert("key1", "value1"); + assert_eq!(map.keys_len(), 1); + + map.insert("key2", "value2"); + assert_eq!(map.keys_len(), 2); + + map.append("key1", "value3"); + assert_eq!(map.keys_len(), 2); + + map.remove(&"key1"); + assert_eq!(map.keys_len(), 1); + + map.remove(&"key2"); + assert_eq!(map.keys_len(), 0); + } + + #[test] + fn test_list_ordered_multimap_new() { + let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + assert_eq!(map.keys_capacity(), 0); + assert_eq!(map.keys_len(), 0); + assert_eq!(map.values_capacity(), 0); + assert_eq!(map.values_len(), 0); + } + + #[test] + fn test_list_ordered_multimap_pack_to() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::with_capacity(5, 5); + map.pack_to_fit(); + assert_eq!(map.keys_capacity(), 0); + assert_eq!(map.values_capacity(), 0); + + let mut map = ListOrderedMultimap::with_capacity(10, 10); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + map.pack_to(5, 5); + + assert_eq!(map.get(&"key1"), Some(&"value1")); + assert_eq!(map.get(&"key2"), Some(&"value2")); + + assert_eq!(map.keys_capacity(), 5); + assert_eq!(map.keys_len(), 2); + assert_eq!(map.values_capacity(), 5); + assert_eq!(map.values_len(), 4); + + let mut iter = map.iter(); + assert_eq!(iter.next(), Some((&"key1", &"value1"))); + assert_eq!(iter.next(), Some((&"key2", &"value2"))); + assert_eq!(iter.next(), Some((&"key2", &"value3"))); + assert_eq!(iter.next(), Some((&"key1", &"value4"))); + assert_eq!(iter.next(), None); + } + + #[should_panic] + #[test] + fn test_list_ordered_multimap_pack_to_panic_key_capacity() { + let mut map = ListOrderedMultimap::new(); + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + map.pack_to(1, 5); + } + + #[should_panic] + #[test] + fn test_list_ordered_multimap_pack_to_panic_value_capacity() { + let mut map = ListOrderedMultimap::new(); + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + map.pack_to(5, 1); + } + + #[test] + fn test_list_ordered_multimap_pack_to_fit() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::with_capacity(5, 5); + map.pack_to_fit(); + assert_eq!(map.keys_capacity(), 0); + assert_eq!(map.values_capacity(), 0); + + let mut map = ListOrderedMultimap::with_capacity(5, 5); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + map.pack_to_fit(); + assert_eq!(map.keys_capacity(), 2); + assert_eq!(map.keys_len(), 2); + assert_eq!(map.values_capacity(), 4); + assert_eq!(map.values_len(), 4); + + let mut iter = map.iter(); + assert_eq!(iter.next(), Some((&"key1", &"value1"))); + assert_eq!(iter.next(), Some((&"key2", &"value2"))); + assert_eq!(iter.next(), Some((&"key2", &"value3"))); + assert_eq!(iter.next(), Some((&"key1", &"value4"))); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_pairs() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.pairs(); + + let (key, mut values) = iter.next().unwrap(); + assert_eq!(key, &"key1"); + assert_eq!(values.next(), Some(&"value1")); + assert_eq!(values.next(), Some(&"value4")); + assert_eq!(values.next(), None); + + let (key, mut values) = iter.next().unwrap(); + assert_eq!(key, &"key2"); + assert_eq!(values.next(), Some(&"value2")); + assert_eq!(values.next(), Some(&"value3")); + assert_eq!(values.next(), None); + + assert!(iter.next().is_none()); + } + + #[test] + fn test_list_ordered_multimap_pairs_mut() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.pairs_mut(); + + let (key, mut values) = iter.next().unwrap(); + assert_eq!(key, &"key1"); + assert_eq!(values.next(), Some(&mut "value1")); + assert_eq!(values.next(), Some(&mut "value4")); + assert_eq!(values.next(), None); + + let (key, mut values) = iter.next().unwrap(); + assert_eq!(key, &"key2"); + assert_eq!(values.next(), Some(&mut "value2")); + assert_eq!(values.next(), Some(&mut "value3")); + assert_eq!(values.next(), None); + + assert!(iter.next().is_none()); + } + + #[test] + fn test_list_ordered_multimap_pop_back() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let (key, value) = map.pop_back().unwrap(); + assert_eq!(key, KeyWrapper::Borrowed(&"key1")); + assert_eq!(&value, &"value4"); + assert_eq!(map.keys_len(), 2); + assert_eq!(map.values_len(), 3); + + let (key, value) = map.pop_back().unwrap(); + assert_eq!(key, KeyWrapper::Borrowed(&"key2")); + assert_eq!(&value, &"value3"); + assert_eq!(map.keys_len(), 2); + assert_eq!(map.values_len(), 2); + + let (key, value) = map.pop_back().unwrap(); + assert_eq!(key, KeyWrapper::Owned("key2")); + assert_eq!(&value, &"value2"); + assert_eq!(map.keys_len(), 1); + assert_eq!(map.values_len(), 1); + + let (key, value) = map.pop_back().unwrap(); + assert_eq!(key, KeyWrapper::Owned("key1")); + assert_eq!(&value, &"value1"); + assert_eq!(map.keys_len(), 0); + assert_eq!(map.values_len(), 0); + + assert!(map.pop_back().is_none()); + } + + #[test] + fn test_list_ordered_multimap_pop_front() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let (key, value) = map.pop_front().unwrap(); + assert_eq!(key, KeyWrapper::Borrowed(&"key1")); + assert_eq!(&value, &"value1"); + assert_eq!(map.keys_len(), 2); + assert_eq!(map.values_len(), 3); + + let (key, value) = map.pop_front().unwrap(); + assert_eq!(key, KeyWrapper::Borrowed(&"key2")); + assert_eq!(&value, &"value2"); + assert_eq!(map.keys_len(), 2); + assert_eq!(map.values_len(), 2); + + let (key, value) = map.pop_front().unwrap(); + assert_eq!(key, KeyWrapper::Owned("key2")); + assert_eq!(&value, &"value3"); + assert_eq!(map.keys_len(), 1); + assert_eq!(map.values_len(), 1); + + let (key, value) = map.pop_front().unwrap(); + assert_eq!(key, KeyWrapper::Owned("key1")); + assert_eq!(&value, &"value4"); + assert_eq!(map.keys_len(), 0); + assert_eq!(map.values_len(), 0); + + assert!(map.pop_front().is_none()); + } + + #[test] + fn test_list_ordered_multimap_remove() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.remove(&"key"), None); + + map.insert("key", "value1"); + map.append("key", "value2"); + assert_eq!(map.remove(&"key"), Some("value1")); + assert_eq!(map.remove(&"key"), None); + } + + #[test] + fn test_list_ordered_multimap_remove_all() { + let mut map = ListOrderedMultimap::new(); + + { + let mut iter = map.remove_all(&"key"); + assert_eq!(iter.next(), None); + } + + map.insert("key", "value1"); + map.append("key", "value2"); + + { + let mut iter = map.remove_all(&"key"); + assert_eq!(iter.next(), Some("value1")); + assert_eq!(iter.next(), Some("value2")); + assert_eq!(iter.next(), None); + } + + let mut iter = map.remove_all(&"key"); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_remove_entry() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.remove_entry(&"key"), None); + + map.insert("key", "value1"); + map.append("key", "value2"); + assert_eq!(map.remove_entry(&"key"), Some(("key", "value1"))); + assert_eq!(map.remove_entry(&"key"), None); + } + + #[test] + fn test_list_ordered_multimap_remove_entry_all() { + let mut map = ListOrderedMultimap::new(); + + { + let entry = map.remove_entry_all(&"key"); + assert!(entry.is_none()); + } + + map.insert("key", "value1"); + map.append("key", "value2"); + + { + let (key, mut iter) = map.remove_entry_all(&"key").unwrap(); + assert_eq!(key, "key"); + assert_eq!(iter.next(), Some("value1")); + assert_eq!(iter.next(), Some("value2")); + assert_eq!(iter.next(), None); + } + + let entry = map.remove_entry_all(&"key"); + assert!(entry.is_none()); + } + + #[test] + fn test_list_ordered_multimap_reserve_keys() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + assert_eq!(map.keys_capacity(), 0); + + map.reserve_keys(5); + assert!(map.keys_capacity() >= 5); + + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::with_capacity(5, 5); + assert_eq!(map.keys_capacity(), 5); + + map.reserve_keys(2); + assert_eq!(map.keys_capacity(), 5); + } + + #[test] + fn test_list_ordered_multimap_reserve_values() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + assert_eq!(map.values_capacity(), 0); + + map.reserve_values(5); + assert!(map.values_capacity() >= 5); + + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::with_capacity(5, 5); + assert_eq!(map.values_capacity(), 5); + + map.reserve_values(2); + assert_eq!(map.values_capacity(), 5); + } + + #[test] + fn test_list_ordered_multimap_retain() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", 1); + map.insert("key2", 5); + map.append("key1", -1); + map.insert("key3", -10); + map.insert("key4", 1); + map.append("key4", -1); + map.append("key4", 1); + + map.retain(|_, &mut value| value >= 0); + + let mut iter = map.iter(); + assert_eq!(iter.next(), Some((&"key1", &1))); + assert_eq!(iter.next(), Some((&"key2", &5))); + assert_eq!(iter.next(), Some((&"key4", &1))); + assert_eq!(iter.next(), Some((&"key4", &1))); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_values() { + let mut map = ListOrderedMultimap::new(); + + let mut iter = map.iter(); + assert_eq!(iter.next(), None); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.values(); + assert_eq!(iter.next(), Some(&"value1")); + assert_eq!(iter.next(), Some(&"value2")); + assert_eq!(iter.next(), Some(&"value3")); + assert_eq!(iter.next(), Some(&"value4")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_values_mut() { + let mut map = ListOrderedMultimap::new(); + + let mut iter = map.iter(); + assert_eq!(iter.next(), None); + + map.insert("key1", "value1"); + map.insert("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.values_mut(); + assert_eq!(iter.next(), Some(&mut "value1")); + assert_eq!(iter.next(), Some(&mut "value2")); + assert_eq!(iter.next(), Some(&mut "value3")); + assert_eq!(iter.next(), Some(&mut "value4")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_list_ordered_multimap_values_capacity() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.values_capacity(), 0); + map.insert("key", "value"); + assert!(map.values_capacity() > 0); + } + + #[test] + fn test_list_ordered_multimap_values_len() { + let mut map = ListOrderedMultimap::new(); + assert_eq!(map.values_len(), 0); + + map.insert("key1", "value1"); + assert_eq!(map.values_len(), 1); + + map.insert("key2", "value2"); + assert_eq!(map.values_len(), 2); + + map.append("key1", "value3"); + assert_eq!(map.values_len(), 3); + + map.remove(&"key1"); + assert_eq!(map.values_len(), 1); + + map.remove(&"key2"); + assert_eq!(map.values_len(), 0); + } + + #[test] + fn test_list_ordered_multimap_with_capacity() { + let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::with_capacity(1, 2); + assert!(map.keys_capacity() >= 1); + assert_eq!(map.keys_len(), 0); + assert!(map.values_capacity() >= 2); + assert_eq!(map.values_len(), 0); + } + + #[test] + fn test_list_ordered_multimap_with_capacity_and_hasher() { + let state = RandomState::new(); + let map: ListOrderedMultimap<&str, &str> = + ListOrderedMultimap::with_capacity_and_hasher(1, 2, state); + assert!(map.keys_capacity() >= 1); + assert_eq!(map.keys_len(), 0); + assert!(map.values_capacity() >= 2); + assert_eq!(map.values_len(), 0); + } + + #[test] + fn test_occupied_entry_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value1"); + map.append("key", "value2"); + map.append("key", "value3"); + map.append("key", "value4"); + + let entry = match map.entry("key") { + Entry::Occupied(entry) => entry, + _ => panic!("expected occupied entry"), + }; + + assert_eq!( + format!("{entry:?}"), + "OccupiedEntry { \ + key: \"key\", \ + values: EntryValues([\"value1\", \"value2\", \"value3\", \"value4\"]) \ + }" + ); + } + + #[test] + fn test_vacant_entry_debug() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let entry = match map.entry("key") { + Entry::Vacant(entry) => entry, + _ => panic!("expected vacant entry"), + }; + + assert_eq!(format!("{entry:?}"), r#"VacantEntry("key")"#); + } + + #[test] + fn test_values_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let iter = map.values(); + assert_eq!( + format!("{iter:?}"), + r#"Values(["value1", "value2", "value3", "value4"])"# + ); + } + + #[test] + fn test_values_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.values(); + assert_eq!(iter.next(), Some(&"value1")); + assert_eq!(iter.next_back(), Some(&"value4")); + assert_eq!(iter.next(), Some(&"value2")); + assert_eq!(iter.next_back(), Some(&"value3")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_values_empty() { + let map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.values(); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_values_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.values(); + assert_eq!(iter.next(), Some(&"value")); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_values_mut_debug() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let iter = map.values_mut(); + assert_eq!( + format!("{iter:?}"), + r#"ValuesMut(["value1", "value2", "value3", "value4"])"# + ); + } + + #[test] + fn test_values_mut_double_ended() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.values_mut(); + assert_eq!(iter.next(), Some(&mut "value1")); + assert_eq!(iter.next_back(), Some(&mut "value4")); + assert_eq!(iter.next(), Some(&mut "value2")); + assert_eq!(iter.next_back(), Some(&mut "value3")); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_values_mut_empty() { + let mut map: ListOrderedMultimap<&str, &str> = ListOrderedMultimap::new(); + let mut iter = map.values_mut(); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_values_mut_fused() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key", "value"); + + let mut iter = map.values_mut(); + assert_eq!(iter.next(), Some(&mut "value")); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(iter.next(), None); + } + + #[test] + fn test_values_mut_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.values_mut(); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[test] + fn test_values_size_hint() { + let mut map = ListOrderedMultimap::new(); + + map.insert("key1", "value1"); + map.append("key2", "value2"); + map.append("key2", "value3"); + map.append("key1", "value4"); + + let mut iter = map.values(); + assert_eq!(iter.size_hint(), (4, Some(4))); + iter.next(); + assert_eq!(iter.size_hint(), (3, Some(3))); + iter.next(); + assert_eq!(iter.size_hint(), (2, Some(2))); + iter.next(); + assert_eq!(iter.size_hint(), (1, Some(1))); + iter.next(); + assert_eq!(iter.size_hint(), (0, Some(0))); + } + + #[should_panic] + #[test] + fn test_dummy_hasher_finish() { + let hasher = DummyHasher; + hasher.finish(); + } + + #[should_panic] + #[test] + fn test_dummy_hasher_write() { + let mut hasher = DummyHasher; + hasher.write(&[]); + } +} diff --git a/vendor/ordered-multimap/src/serde.rs b/vendor/ordered-multimap/src/serde.rs new file mode 100644 index 0000000..994cc53 --- /dev/null +++ b/vendor/ordered-multimap/src/serde.rs @@ -0,0 +1,135 @@ +use core::{ + fmt::{self, Formatter}, + hash::{BuildHasher, Hash}, + marker::PhantomData, +}; + +use serde::{ + de::{Deserialize, Deserializer, SeqAccess, Visitor}, + ser::{Serialize, SerializeSeq, Serializer}, +}; + +use crate::ListOrderedMultimap; + +impl Serialize for ListOrderedMultimap +where + K: Clone + Eq + Hash + Serialize, + V: Serialize, + S: BuildHasher, +{ + fn serialize(&self, serializer: T) -> Result + where + T: Serializer, + { + let mut seq = serializer.serialize_seq(Some(self.values_len()))?; + + for (key, value) in self.into_iter() { + seq.serialize_element(&(key, value))?; + } + + seq.end() + } +} + +struct ListOrderedMultimapVisitor(PhantomData<(K, V, S)>); + +impl<'de, K, V, S> Visitor<'de> for ListOrderedMultimapVisitor +where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: BuildHasher + Default, +{ + type Value = ListOrderedMultimap; + + fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "a sequence") + } + + fn visit_seq(self, mut access: A) -> Result + where + A: SeqAccess<'de>, + { + let mut map = ListOrderedMultimap::with_capacity_and_hasher( + access.size_hint().unwrap_or_default(), + access.size_hint().unwrap_or_default(), + S::default(), + ); + + while let Some((key, value)) = access.next_element()? { + let _ = map.append(key, value); + } + + Ok(map) + } +} + +impl<'de, K, V, S> Deserialize<'de> for ListOrderedMultimap +where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: BuildHasher + Default, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_seq(ListOrderedMultimapVisitor(PhantomData)) + } +} + +#[allow(unused_results)] +#[cfg(all(test, feature = "std"))] +mod test { + use coverage_helper::test; + use serde_test::{assert_de_tokens_error, assert_tokens, Token}; + + use super::*; + + #[test] + fn test_de_error() { + assert_de_tokens_error::>( + &[Token::Map { len: Some(0) }], + "invalid type: map, expected a sequence", + ); + } + + #[test] + fn test_ser_de_empty() { + let map = ListOrderedMultimap::::new(); + + assert_tokens(&map, &[Token::Seq { len: Some(0) }, Token::SeqEnd]); + } + + #[test] + fn test_ser_de() { + let mut map = ListOrderedMultimap::new(); + map.append('b', 20); + map.append('a', 10); + map.append('c', 30); + map.append('b', 30); + + assert_tokens( + &map, + &[ + Token::Seq { len: Some(4) }, + Token::Tuple { len: 2 }, + Token::Char('b'), + Token::I32(20), + Token::TupleEnd, + Token::Tuple { len: 2 }, + Token::Char('a'), + Token::I32(10), + Token::TupleEnd, + Token::Tuple { len: 2 }, + Token::Char('c'), + Token::I32(30), + Token::TupleEnd, + Token::Tuple { len: 2 }, + Token::Char('b'), + Token::I32(30), + Token::TupleEnd, + Token::SeqEnd, + ], + ); + } +} diff --git a/vendor/rust-ini/.cargo-checksum.json b/vendor/rust-ini/.cargo-checksum.json new file mode 100644 index 0000000..bc59861 --- /dev/null +++ b/vendor/rust-ini/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.lock":"6ede11c40f7c73e9bb7d564e9142acd52eada066a3002c8de13457d073dc8477","Cargo.toml":"823914fe37078a356a0388e64f23050453d12db1ca66061c99b294db47dac9f2","LICENSE":"ccf6244964385d34fef3799aa7792e9f8d35517de026f39a4f43f0e89b2079eb","README.rst":"cfe3415a25a215bd6d5f63fab904725d7a1b9ff89b358fe3fad33f2598bf5ba6","examples/test.rs":"a41ab5b4979252853e86dfe429f5fef9b79e1701987f2991ce0d2ffef9e7b210","rustfmt.toml":"15e435a3b302e6a65da0646a4dbfe949f33d5d6cc96be1fae7dcb56cfb90dabf","src/lib.rs":"453442a47e46700cbccb64559912bca141e9e1872169e3461e21d77f6bd4b291"},"package":"7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091"} \ No newline at end of file diff --git a/vendor/rust-ini/Cargo.toml b/vendor/rust-ini/Cargo.toml new file mode 100644 index 0000000..8ed21cb --- /dev/null +++ b/vendor/rust-ini/Cargo.toml @@ -0,0 +1,46 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "rust-ini" +version = "0.19.0" +authors = ["Y. T. Chung "] +description = "An Ini configuration file parsing library in Rust" +documentation = "https://docs.rs/rust-ini/" +keywords = [ + "ini", + "configuration", + "conf", + "cfg", +] +license = "MIT" +repository = "https://github.com/zonyitoo/rust-ini" + +[lib] +name = "ini" +test = true + +[dependencies.cfg-if] +version = "1.0" + +[dependencies.ordered-multimap] +version = "0.6" + +[dependencies.unicase] +version = "2.6" +optional = true + +[features] +brackets-in-section-names = [] +case-insensitive = ["unicase"] +default = [] +inline-comment = [] diff --git a/vendor/rust-ini/LICENSE b/vendor/rust-ini/LICENSE new file mode 100644 index 0000000..49be0b0 --- /dev/null +++ b/vendor/rust-ini/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2014 Y. T. CHUNG + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/rust-ini/README.rst b/vendor/rust-ini/README.rst new file mode 100644 index 0000000..1881853 --- /dev/null +++ b/vendor/rust-ini/README.rst @@ -0,0 +1,114 @@ +INI in Rust +----------- + +.. image:: https://github.com/zonyitoo/rust-ini/actions/workflows/build-and-test.yml/badge.svg + :target: https://github.com/zonyitoo/rust-ini/actions/workflows/build-and-test.yml + +.. image:: https://img.shields.io/crates/v/rust-ini.svg + :target: https://crates.io/crates/rust-ini + +.. image:: https://docs.rs/rust-ini/badge.svg + :target: https://docs.rs/rust-ini + +INI_ is an informal standard for configuration files for some platforms or software. INI files are simple text files with a basic structure composed of "sections" and "properties". + +.. _INI: http://en.wikipedia.org/wiki/INI_file + +This is an INI file parser in Rust_. + +.. _Rust: http://www.rust-lang.org/ + +.. code:: toml + + [dependencies] + rust-ini = "0.19" + +Usage +===== + +* Create a Ini configuration file. + +.. code:: rust + + extern crate ini; + use ini::Ini; + + fn main() { + let mut conf = Ini::new(); + conf.with_section(None::) + .set("encoding", "utf-8"); + conf.with_section(Some("User")) + .set("given_name", "Tommy") + .set("family_name", "Green") + .set("unicode", "Raspberry树莓"); + conf.with_section(Some("Book")) + .set("name", "Rust cool"); + conf.write_to_file("conf.ini").unwrap(); + } + +Then you will get ``conf.ini`` + +.. code:: ini + + encoding=utf-8 + + [User] + given_name=Tommy + family_name=Green + unicode=Raspberry\x6811\x8393 + + [Book] + name=Rust cool + +* Read from file ``conf.ini`` + +.. code:: rust + + extern crate ini; + use ini::Ini; + + fn main() { + let conf = Ini::load_from_file("conf.ini").unwrap(); + + let section = conf.section(Some("User")).unwrap(); + let tommy = section.get("given_name").unwrap(); + let green = section.get("family_name").unwrap(); + + println!("{:?} {:?}", tommy, green); + + // iterating + for (sec, prop) in &conf { + println!("Section: {:?}", sec); + for (key, value) in prop.iter() { + println!("{:?}:{:?}", key, value); + } + } + } + +* More details could be found in `examples`. + +License +======= + +`The MIT License (MIT)`_ + +.. _The MIT License (MIT): https://opensource.org/licenses/MIT + +Copyright (c) 2014 Y. T. CHUNG + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/rust-ini/examples/test.rs b/vendor/rust-ini/examples/test.rs new file mode 100644 index 0000000..315efba --- /dev/null +++ b/vendor/rust-ini/examples/test.rs @@ -0,0 +1,44 @@ +use std::io::stdout; + +use ini::Ini; + +const CONF_FILE_NAME: &str = "test.ini"; + +fn main() { + let mut conf = Ini::new(); + conf.with_section(None::).set("encoding", "utf-8"); + conf.with_section(Some("User")) + .set("name", "Raspberry树莓") + .set("value", "Pi"); + conf.with_section(Some("Library")) + .set("name", "Sun Yat-sen U") + .set("location", "Guangzhou=world\x0ahahaha"); + + conf.section_mut(Some("Library")).unwrap().insert("seats", "42"); + + println!("---------------------------------------"); + println!("Writing to file {:?}\n", CONF_FILE_NAME); + conf.write_to(&mut stdout()).unwrap(); + + conf.write_to_file(CONF_FILE_NAME).unwrap(); + + println!("----------------------------------------"); + println!("Reading from file {:?}", CONF_FILE_NAME); + let i = Ini::load_from_file(CONF_FILE_NAME).unwrap(); + + println!("Iterating"); + let general_section_name = "__General__"; + for (sec, prop) in i.iter() { + let section_name = sec.as_ref().unwrap_or(&general_section_name); + println!("-- Section: {:?} begins", section_name); + for (k, v) in prop.iter() { + println!("{}: {:?}", k, v); + } + } + println!(); + + let section = i.section(Some("User")).unwrap(); + println!("name={}", section.get("name").unwrap()); + println!("conf[User][name]={}", &i["User"]["name"]); + println!("General Section: {:?}", i.general_section()); +} diff --git a/vendor/rust-ini/rustfmt.toml b/vendor/rust-ini/rustfmt.toml new file mode 100644 index 0000000..762f11c --- /dev/null +++ b/vendor/rust-ini/rustfmt.toml @@ -0,0 +1,12 @@ +max_width = 120 +indent_style = "Visual" +#fn_call_width = 120 +reorder_imports = true +reorder_imports_in_group = true +reorder_imported_names = true +condense_wildcard_suffixes = true +#fn_args_layout = "Visual" +#fn_call_style = "Visual" +#chain_indent = "Visual" +normalize_comments = true +use_try_shorthand = true diff --git a/vendor/rust-ini/src/lib.rs b/vendor/rust-ini/src/lib.rs new file mode 100644 index 0000000..ab745d6 --- /dev/null +++ b/vendor/rust-ini/src/lib.rs @@ -0,0 +1,2407 @@ +// The MIT License (MIT) + +// Copyright (c) 2014 Y. T. CHUNG + +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +//! Ini parser for Rust +//! +//! ```no_run +//! use ini::Ini; +//! +//! let mut conf = Ini::new(); +//! conf.with_section(Some("User")) +//! .set("name", "Raspberry树莓") +//! .set("value", "Pi"); +//! conf.with_section(Some("Library")) +//! .set("name", "Sun Yat-sen U") +//! .set("location", "Guangzhou=world"); +//! conf.write_to_file("conf.ini").unwrap(); +//! +//! let i = Ini::load_from_file("conf.ini").unwrap(); +//! for (sec, prop) in i.iter() { +//! println!("Section: {:?}", sec); +//! for (k, v) in prop.iter() { +//! println!("{}:{}", k, v); +//! } +//! } +//! ``` + +use std::char; +use std::error; +use std::fmt::{self, Display}; +use std::fs::{File, OpenOptions}; +use std::io::{self, Read, Write}; +use std::io::{Seek, SeekFrom}; +use std::ops::{Index, IndexMut}; +use std::path::Path; +use std::str::Chars; + +use cfg_if::cfg_if; +use ordered_multimap::list_ordered_multimap::{Entry, Iter, IterMut, OccupiedEntry, VacantEntry}; +use ordered_multimap::ListOrderedMultimap; +#[cfg(feature = "case-insensitive")] +use unicase::UniCase; + +/// Policies for escaping logic +#[derive(Debug, PartialEq, Copy, Clone)] +pub enum EscapePolicy { + /// Escape absolutely nothing (dangerous) + Nothing, + /// Only escape the most necessary things. + /// This means backslashes, control characters (codepoints U+0000 to U+001F), and delete (U+007F). + /// Quotes (single or double) are not escaped. + Basics, + /// Escape basics and non-ASCII characters in the [Basic Multilingual Plane](https://www.compart.com/en/unicode/plane) + /// (i.e. between U+007F - U+FFFF) + /// Codepoints above U+FFFF, e.g. '🐱' U+1F431 "CAT FACE" will *not* be escaped! + BasicsUnicode, + /// Escape basics and all non-ASCII characters, including codepoints above U+FFFF. + /// This will escape emoji - if you want them to remain raw, use BasicsUnicode instead. + BasicsUnicodeExtended, + /// Escape reserved symbols. + /// This includes everything in EscapePolicy::Basics, plus the comment characters ';' and '#' and the key/value-separating characters '=' and ':'. + Reserved, + /// Escape reserved symbols and non-ASCII characters in the BMP. + /// Codepoints above U+FFFF, e.g. '🐱' U+1F431 "CAT FACE" will *not* be escaped! + ReservedUnicode, + /// Escape reserved symbols and all non-ASCII characters, including codepoints above U+FFFF. + ReservedUnicodeExtended, + /// Escape everything that some INI implementations assume + Everything, +} + +impl EscapePolicy { + fn escape_basics(self) -> bool { + match self { + EscapePolicy::Nothing => false, + _ => true, + } + } + + fn escape_reserved(self) -> bool { + match self { + EscapePolicy::Reserved => true, + EscapePolicy::ReservedUnicode => true, + EscapePolicy::ReservedUnicodeExtended => true, + EscapePolicy::Everything => true, + _ => false, + } + } + + fn escape_unicode(self) -> bool { + match self { + EscapePolicy::BasicsUnicode => true, + EscapePolicy::BasicsUnicodeExtended => true, + EscapePolicy::ReservedUnicode => true, + EscapePolicy::ReservedUnicodeExtended => true, + EscapePolicy::Everything => true, + _ => false, + } + } + + fn escape_unicode_extended(self) -> bool { + match self { + EscapePolicy::BasicsUnicodeExtended => true, + EscapePolicy::ReservedUnicodeExtended => true, + EscapePolicy::Everything => true, + _ => false, + } + } + + /// Given a character this returns true if it should be escaped as + /// per this policy or false if not. + pub fn should_escape(self, c: char) -> bool { + match c { + // A single backslash, must be escaped + // ASCII control characters, U+0000 NUL..= U+001F UNIT SEPARATOR, or U+007F DELETE. The same as char::is_ascii_control() + '\\' | '\x00'..='\x1f' | '\x7f' => self.escape_basics(), + ';' | '#' | '=' | ':' => self.escape_reserved(), + '\u{0080}'..='\u{FFFF}' => self.escape_unicode(), + '\u{10000}'..='\u{10FFFF}' => self.escape_unicode_extended(), + _ => false, + } + } +} + +// Escape non-INI characters +// +// Common escape sequences: https://en.wikipedia.org/wiki/INI_file#Escape_characters +// +// * `\\` \ (a single backslash, escaping the escape character) +// * `\0` Null character +// * `\a` Bell/Alert/Audible +// * `\b` Backspace, Bell character for some applications +// * `\t` Tab character +// * `\r` Carriage return +// * `\n` Line feed +// * `\;` Semicolon +// * `\#` Number sign +// * `\=` Equals sign +// * `\:` Colon +// * `\x????` Unicode character with hexadecimal code point corresponding to ???? +fn escape_str(s: &str, policy: EscapePolicy) -> String { + let mut escaped: String = String::with_capacity(s.len()); + for c in s.chars() { + // if we know this is not something to escape as per policy, we just + // write it and continue. + if !policy.should_escape(c) { + escaped.push(c); + continue; + } + + match c { + '\\' => escaped.push_str("\\\\"), + '\0' => escaped.push_str("\\0"), + '\x01'..='\x06' | '\x0e'..='\x1f' | '\x7f'..='\u{00ff}' => { + escaped.push_str(&format!("\\x{:04x}", c as isize)[..]) + } + '\x07' => escaped.push_str("\\a"), + '\x08' => escaped.push_str("\\b"), + '\x0c' => escaped.push_str("\\f"), + '\x0b' => escaped.push_str("\\v"), + '\n' => escaped.push_str("\\n"), + '\t' => escaped.push_str("\\t"), + '\r' => escaped.push_str("\\r"), + '\u{0080}'..='\u{FFFF}' => escaped.push_str(&format!("\\x{:04x}", c as isize)[..]), + // Longer escapes. + '\u{10000}'..='\u{FFFFF}' => escaped.push_str(&format!("\\x{:05x}", c as isize)[..]), + '\u{100000}'..='\u{10FFFF}' => escaped.push_str(&format!("\\x{:06x}", c as isize)[..]), + _ => { + escaped.push('\\'); + escaped.push(c); + } + } + } + escaped +} + +/// Parsing configuration +pub struct ParseOption { + /// Allow quote (`"` or `'`) in value + /// For example + /// ```ini + /// [Section] + /// Key1="Quoted value" + /// Key2='Single Quote' with extra value + /// ``` + /// + /// In this example, Value of `Key1` is `Quoted value`, + /// and value of `Key2` is `Single Quote with extra value` + /// if `enabled_quote` is set to `true`. + pub enabled_quote: bool, + + /// Interpret `\` as an escape character + /// For example + /// ```ini + /// [Section] + /// Key1=C:\Windows + /// ``` + /// + /// If `enabled_escape` is true, then the value of `Key` will become `C:Windows` (`\W` equals to `W`). + pub enabled_escape: bool, +} + +impl Default for ParseOption { + fn default() -> ParseOption { + ParseOption { + enabled_quote: true, + enabled_escape: true, + } + } +} + +/// Newline style +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub enum LineSeparator { + /// System-dependent line separator + /// + /// On UNIX system, uses "\n" + /// On Windows system, uses "\r\n" + SystemDefault, + + /// Uses "\n" as new line separator + CR, + + /// Uses "\r\n" as new line separator + CRLF, +} + +#[cfg(not(windows))] +static DEFAULT_LINE_SEPARATOR: &str = "\n"; + +#[cfg(windows)] +static DEFAULT_LINE_SEPARATOR: &str = "\r\n"; + +static DEFAULT_KV_SEPARATOR: &str = "="; + +impl fmt::Display for LineSeparator { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.write_str(self.as_str()) + } +} + +impl LineSeparator { + /// String representation + pub fn as_str(self) -> &'static str { + match self { + LineSeparator::SystemDefault => DEFAULT_LINE_SEPARATOR, + LineSeparator::CR => "\n", + LineSeparator::CRLF => "\r\n", + } + } +} + +/// Writing configuration +#[derive(Debug, Clone)] +pub struct WriteOption { + /// Policies about how to escape characters + pub escape_policy: EscapePolicy, + + /// Newline style + pub line_separator: LineSeparator, + + /// Key value separator + pub kv_separator: &'static str, +} + +impl Default for WriteOption { + fn default() -> WriteOption { + WriteOption { + escape_policy: EscapePolicy::Basics, + line_separator: LineSeparator::SystemDefault, + kv_separator: DEFAULT_KV_SEPARATOR, + } + } +} + +cfg_if! { + if #[cfg(feature = "case-insensitive")] { + /// Internal storage of section's key + pub type SectionKey = Option>; + /// Internal storage of property's key + pub type PropertyKey = UniCase; + + macro_rules! property_get_key { + ($s:expr) => { + &UniCase::from($s) + }; + } + + macro_rules! property_insert_key { + ($s:expr) => { + UniCase::from($s) + }; + } + + macro_rules! section_key { + ($s:expr) => { + $s.map(|s| UniCase::from(s.into())) + }; + } + + } else { + /// Internal storage of section's key + pub type SectionKey = Option; + /// Internal storage of property's key + pub type PropertyKey = String; + + macro_rules! property_get_key { + ($s:expr) => { + $s + }; + } + + macro_rules! property_insert_key { + ($s:expr) => { + $s + }; + } + + macro_rules! section_key { + ($s:expr) => { + $s.map(Into::into) + }; + } + } +} + +/// A setter which could be used to set key-value pair in a specified section +pub struct SectionSetter<'a> { + ini: &'a mut Ini, + section_name: Option, +} + +impl<'a> SectionSetter<'a> { + fn new(ini: &'a mut Ini, section_name: Option) -> SectionSetter<'a> { + SectionSetter { ini, section_name } + } + + /// Set (replace) key-value pair in this section (all with the same name) + pub fn set(&'a mut self, key: K, value: V) -> &'a mut SectionSetter<'a> + where + K: Into, + V: Into, + { + self.ini + .entry(self.section_name.clone()) + .or_insert_with(Default::default) + .insert(key, value); + + self + } + + /// Delete the first entry in this section with `key` + pub fn delete>(&'a mut self, key: &K) -> &'a mut SectionSetter<'a> { + for prop in self.ini.section_all_mut(self.section_name.as_ref()) { + prop.remove(key); + } + + self + } + + /// Get the entry in this section with `key` + pub fn get>(&'a mut self, key: K) -> Option<&'a str> { + self.ini + .section(self.section_name.as_ref()) + .and_then(|prop| prop.get(key)) + .map(AsRef::as_ref) + } +} + +/// Properties type (key-value pairs) +#[derive(Clone, Default, Debug, PartialEq)] +pub struct Properties { + data: ListOrderedMultimap, +} + +impl Properties { + /// Create an instance + pub fn new() -> Properties { + Default::default() + } + + /// Get the number of the properties + pub fn len(&self) -> usize { + self.data.keys_len() + } + + /// Check if properties has 0 elements + pub fn is_empty(&self) -> bool { + self.data.is_empty() + } + + /// Get an iterator of the properties + pub fn iter(&self) -> impl DoubleEndedIterator { + self.data.iter().map(|(k, v)| (k.as_ref(), v.as_str())) + } + + /// Return true if property exist + pub fn contains_key>(&self, s: S) -> bool { + self.data.contains_key(property_get_key!(s.as_ref())) + } + + /// Insert (key, value) pair by replace + pub fn insert(&mut self, k: K, v: V) + where + K: Into, + V: Into, + { + self.data.insert(property_insert_key!(k.into()), v.into()); + } + + /// Append key with (key, value) pair + pub fn append(&mut self, k: K, v: V) + where + K: Into, + V: Into, + { + self.data.append(property_insert_key!(k.into()), v.into()); + } + + /// Get the first value associate with the key + pub fn get>(&self, s: S) -> Option<&str> { + self.data.get(property_get_key!(s.as_ref())).map(|v| v.as_str()) + } + + /// Get all values associate with the key + pub fn get_all>(&self, s: S) -> impl DoubleEndedIterator { + self.data.get_all(property_get_key!(s.as_ref())).map(|v| v.as_str()) + } + + /// Remove the property with the first value of the key + pub fn remove>(&mut self, s: S) -> Option { + self.data.remove(property_get_key!(s.as_ref())) + } + + /// Remove the property with all values with the same key + pub fn remove_all<'a, S: AsRef>(&'a mut self, s: S) -> impl DoubleEndedIterator + 'a { + self.data.remove_all(property_get_key!(s.as_ref())) + } + + fn get_mut>(&mut self, s: S) -> Option<&mut str> { + self.data.get_mut(property_get_key!(s.as_ref())).map(|v| v.as_mut_str()) + } +} + +impl> Index for Properties { + type Output = str; + + fn index(&self, index: S) -> &str { + let s = index.as_ref(); + match self.get(s) { + Some(p) => p, + None => panic!("Key `{}` does not exist", s), + } + } +} + +/// A view into a vacant entry in a `Ini` +pub struct SectionVacantEntry<'a> { + inner: VacantEntry<'a, SectionKey, Properties>, +} + +impl<'a> SectionVacantEntry<'a> { + /// Insert one new section + pub fn insert(self, value: Properties) -> &'a mut Properties { + self.inner.insert(value) + } +} + +/// A view into a occupied entry in a `Ini` +pub struct SectionOccupiedEntry<'a> { + inner: OccupiedEntry<'a, SectionKey, Properties>, +} + +impl<'a> SectionOccupiedEntry<'a> { + /// Into the first internal mutable properties + pub fn into_mut(self) -> &'a mut Properties { + self.inner.into_mut() + } + + /// Append a new section + pub fn append(&mut self, prop: Properties) { + self.inner.append(prop); + } + + fn last_mut(&'a mut self) -> &'a mut Properties { + self.inner + .iter_mut() + .next_back() + .expect("occupied section shouldn't have 0 property") + } +} + +/// A view into an `Ini`, which may either be vacant or occupied. +pub enum SectionEntry<'a> { + Vacant(SectionVacantEntry<'a>), + Occupied(SectionOccupiedEntry<'a>), +} + +impl<'a> SectionEntry<'a> { + /// Ensures a value is in the entry by inserting the default if empty, and returns a mutable reference to the value in the entry. + pub fn or_insert(self, properties: Properties) -> &'a mut Properties { + match self { + SectionEntry::Occupied(e) => e.into_mut(), + SectionEntry::Vacant(e) => e.insert(properties), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, and returns a mutable reference to the value in the entry. + pub fn or_insert_with Properties>(self, default: F) -> &'a mut Properties { + match self { + SectionEntry::Occupied(e) => e.into_mut(), + SectionEntry::Vacant(e) => e.insert(default()), + } + } +} + +impl<'a> From> for SectionEntry<'a> { + fn from(e: Entry<'a, SectionKey, Properties>) -> SectionEntry<'a> { + match e { + Entry::Occupied(inner) => SectionEntry::Occupied(SectionOccupiedEntry { inner }), + Entry::Vacant(inner) => SectionEntry::Vacant(SectionVacantEntry { inner }), + } + } +} + +/// Ini struct +#[derive(Debug, Clone)] +pub struct Ini { + sections: ListOrderedMultimap, +} + +impl Ini { + /// Create an instance + pub fn new() -> Ini { + Default::default() + } + + /// Set with a specified section, `None` is for the general section + pub fn with_section(&mut self, section: Option) -> SectionSetter + where + S: Into, + { + SectionSetter::new(self, section.map(Into::into)) + } + + /// Set with general section, a simple wrapper of `with_section(None::)` + pub fn with_general_section(&mut self) -> SectionSetter { + self.with_section(None::) + } + + /// Get the immutable general section + pub fn general_section(&self) -> &Properties { + self.section(None::) + .expect("There is no general section in this Ini") + } + + /// Get the mutable general section + pub fn general_section_mut(&mut self) -> &mut Properties { + self.section_mut(None::) + .expect("There is no general section in this Ini") + } + + /// Get a immutable section + pub fn section(&self, name: Option) -> Option<&Properties> + where + S: Into, + { + self.sections.get(§ion_key!(name)) + } + + /// Get a mutable section + pub fn section_mut(&mut self, name: Option) -> Option<&mut Properties> + where + S: Into, + { + self.sections.get_mut(§ion_key!(name)) + } + + /// Get all sections immutable with the same key + pub fn section_all(&self, name: Option) -> impl DoubleEndedIterator + where + S: Into, + { + self.sections.get_all(§ion_key!(name)) + } + + /// Get all sections mutable with the same key + pub fn section_all_mut(&mut self, name: Option) -> impl DoubleEndedIterator + where + S: Into, + { + self.sections.get_all_mut(§ion_key!(name)) + } + + /// Get the entry + #[cfg(not(feature = "case-insensitive"))] + pub fn entry(&mut self, name: Option) -> SectionEntry<'_> { + SectionEntry::from(self.sections.entry(name.map(|s| s))) + } + + /// Get the entry + #[cfg(feature = "case-insensitive")] + pub fn entry(&mut self, name: Option) -> SectionEntry<'_> { + SectionEntry::from(self.sections.entry(name.map(UniCase::from))) + } + + /// Clear all entries + pub fn clear(&mut self) { + self.sections.clear() + } + + /// Iterate with sections + pub fn sections(&self) -> impl DoubleEndedIterator> { + self.sections.keys().map(|s| s.as_ref().map(AsRef::as_ref)) + } + + /// Set key-value to a section + pub fn set_to(&mut self, section: Option, key: String, value: String) + where + S: Into, + { + self.with_section(section).set(key, value); + } + + /// Get the first value from the sections with key + /// + /// Example: + /// + /// ``` + /// use ini::Ini; + /// let input = "[sec]\nabc = def\n"; + /// let ini = Ini::load_from_str(input).unwrap(); + /// assert_eq!(ini.get_from(Some("sec"), "abc"), Some("def")); + /// ``` + pub fn get_from<'a, S>(&'a self, section: Option, key: &str) -> Option<&'a str> + where + S: Into, + { + self.sections.get(§ion_key!(section)).and_then(|prop| prop.get(key)) + } + + /// Get the first value from the sections with key, return the default value if it does not exist + /// + /// Example: + /// + /// ``` + /// use ini::Ini; + /// let input = "[sec]\n"; + /// let ini = Ini::load_from_str(input).unwrap(); + /// assert_eq!(ini.get_from_or(Some("sec"), "key", "default"), "default"); + /// ``` + pub fn get_from_or<'a, S>(&'a self, section: Option, key: &str, default: &'a str) -> &'a str + where + S: Into, + { + self.get_from(section, key).unwrap_or(default) + } + + /// Get the first mutable value from the sections with key + pub fn get_from_mut<'a, S>(&'a mut self, section: Option, key: &str) -> Option<&'a mut str> + where + S: Into, + { + self.sections + .get_mut(§ion_key!(section)) + .and_then(|prop| prop.get_mut(key)) + } + + /// Delete the first section with key, return the properties if it exists + pub fn delete(&mut self, section: Option) -> Option + where + S: Into, + { + let key = section_key!(section); + self.sections.remove(&key) + } + + /// Delete the key from the section, return the value if key exists or None + pub fn delete_from(&mut self, section: Option, key: &str) -> Option + where + S: Into, + { + self.section_mut(section).and_then(|prop| prop.remove(key)) + } + + /// Total sections count + pub fn len(&self) -> usize { + self.sections.keys_len() + } + + /// Check if object contains no section + pub fn is_empty(&self) -> bool { + self.sections.is_empty() + } +} + +impl Default for Ini { + /// Creates an ini instance with an empty general section. This allows [Ini::general_section] + /// and [Ini::with_general_section] to be called without panicking. + fn default() -> Self { + let mut result = Ini { + sections: Default::default(), + }; + + result.sections.insert(None, Default::default()); + + result + } +} + +impl> Index> for Ini { + type Output = Properties; + + fn index(&self, index: Option) -> &Properties { + match self.section(index) { + Some(p) => p, + None => panic!("Section does not exist"), + } + } +} + +impl> IndexMut> for Ini { + fn index_mut(&mut self, index: Option) -> &mut Properties { + match self.section_mut(index) { + Some(p) => p, + None => panic!("Section does not exist"), + } + } +} + +impl<'q> Index<&'q str> for Ini { + type Output = Properties; + + fn index<'a>(&'a self, index: &'q str) -> &'a Properties { + match self.section(Some(index)) { + Some(p) => p, + None => panic!("Section `{}` does not exist", index), + } + } +} + +impl<'q> IndexMut<&'q str> for Ini { + fn index_mut<'a>(&'a mut self, index: &'q str) -> &'a mut Properties { + match self.section_mut(Some(index)) { + Some(p) => p, + None => panic!("Section `{}` does not exist", index), + } + } +} + +impl Ini { + /// Write to a file + pub fn write_to_file>(&self, filename: P) -> io::Result<()> { + self.write_to_file_policy(filename, EscapePolicy::Basics) + } + + /// Write to a file + pub fn write_to_file_policy>(&self, filename: P, policy: EscapePolicy) -> io::Result<()> { + let mut file = OpenOptions::new() + .write(true) + .truncate(true) + .create(true) + .open(filename.as_ref())?; + self.write_to_policy(&mut file, policy) + } + + /// Write to a file with options + pub fn write_to_file_opt>(&self, filename: P, opt: WriteOption) -> io::Result<()> { + let mut file = OpenOptions::new() + .write(true) + .truncate(true) + .create(true) + .open(filename.as_ref())?; + self.write_to_opt(&mut file, opt) + } + + /// Write to a writer + pub fn write_to(&self, writer: &mut W) -> io::Result<()> { + self.write_to_opt(writer, Default::default()) + } + + /// Write to a writer + pub fn write_to_policy(&self, writer: &mut W, policy: EscapePolicy) -> io::Result<()> { + self.write_to_opt( + writer, + WriteOption { + escape_policy: policy, + ..Default::default() + }, + ) + } + + /// Write to a writer with options + pub fn write_to_opt(&self, writer: &mut W, opt: WriteOption) -> io::Result<()> { + let mut firstline = true; + + if let Some(props) = self.sections.get(&None) { + for (k, v) in props.iter() { + let k_str = escape_str(&k[..], opt.escape_policy); + let v_str = escape_str(&v[..], opt.escape_policy); + write!(writer, "{}={}{}", k_str, v_str, opt.line_separator)?; + + firstline = false; + } + } + + for (section, props) in &self.sections { + if let Some(ref section) = *section { + if firstline { + firstline = false; + } else { + // Write an empty line between sections + writer.write_all(opt.line_separator.as_str().as_bytes())?; + } + + write!( + writer, + "[{}]{}", + escape_str(§ion[..], opt.escape_policy), + opt.line_separator + )?; + + for (k, v) in props.iter() { + let k_str = escape_str(&k[..], opt.escape_policy); + let v_str = escape_str(&v[..], opt.escape_policy); + write!(writer, "{}{}{}{}", k_str, opt.kv_separator, v_str, opt.line_separator)?; + } + } + } + Ok(()) + } +} + +impl Ini { + /// Load from a string + pub fn load_from_str(buf: &str) -> Result { + Ini::load_from_str_opt(buf, ParseOption::default()) + } + + /// Load from a string, but do not interpret '\' as an escape character + pub fn load_from_str_noescape(buf: &str) -> Result { + Ini::load_from_str_opt( + buf, + ParseOption { + enabled_escape: false, + ..ParseOption::default() + }, + ) + } + + /// Load from a string with options + pub fn load_from_str_opt(buf: &str, opt: ParseOption) -> Result { + let mut parser = Parser::new(buf.chars(), opt); + parser.parse() + } + + /// Load from a reader + pub fn read_from(reader: &mut R) -> Result { + Ini::read_from_opt(reader, ParseOption::default()) + } + + /// Load from a reader, but do not interpret '\' as an escape character + pub fn read_from_noescape(reader: &mut R) -> Result { + Ini::read_from_opt( + reader, + ParseOption { + enabled_escape: false, + ..ParseOption::default() + }, + ) + } + + /// Load from a reader with options + pub fn read_from_opt(reader: &mut R, opt: ParseOption) -> Result { + let mut s = String::new(); + reader.read_to_string(&mut s).map_err(Error::Io)?; + let mut parser = Parser::new(s.chars(), opt); + match parser.parse() { + Err(e) => Err(Error::Parse(e)), + Ok(success) => Ok(success), + } + } + + /// Load from a file + pub fn load_from_file>(filename: P) -> Result { + Ini::load_from_file_opt(filename, ParseOption::default()) + } + + /// Load from a file, but do not interpret '\' as an escape character + pub fn load_from_file_noescape>(filename: P) -> Result { + Ini::load_from_file_opt( + filename, + ParseOption { + enabled_escape: false, + ..ParseOption::default() + }, + ) + } + + /// Load from a file with options + pub fn load_from_file_opt>(filename: P, opt: ParseOption) -> Result { + let mut reader = match File::open(filename.as_ref()) { + Err(e) => { + return Err(Error::Io(e)); + } + Ok(r) => r, + }; + + let mut with_bom = false; + + // Check if file starts with a BOM marker + // UTF-8: EF BB BF + let mut bom = [0u8; 3]; + if let Ok(..) = reader.read_exact(&mut bom) { + if &bom == b"\xEF\xBB\xBF" { + with_bom = true; + } + } + + if !with_bom { + // Reset file pointer + reader.seek(SeekFrom::Start(0))?; + } + + Ini::read_from_opt(&mut reader, opt) + } +} + +/// Iterator for traversing sections +pub struct SectionIter<'a> { + inner: Iter<'a, SectionKey, Properties>, +} + +impl<'a> Iterator for SectionIter<'a> { + type Item = (Option<&'a str>, &'a Properties); + + fn next(&mut self) -> Option { + self.inner.next().map(|(k, v)| (k.as_ref().map(|s| s.as_str()), v)) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl DoubleEndedIterator for SectionIter<'_> { + fn next_back(&mut self) -> Option { + self.inner.next_back().map(|(k, v)| (k.as_ref().map(|s| s.as_str()), v)) + } +} + +/// Iterator for traversing sections +pub struct SectionIterMut<'a> { + inner: IterMut<'a, SectionKey, Properties>, +} + +impl<'a> Iterator for SectionIterMut<'a> { + type Item = (Option<&'a str>, &'a mut Properties); + + fn next(&mut self) -> Option { + self.inner.next().map(|(k, v)| (k.as_ref().map(|s| s.as_str()), v)) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl DoubleEndedIterator for SectionIterMut<'_> { + fn next_back(&mut self) -> Option { + self.inner.next_back().map(|(k, v)| (k.as_ref().map(|s| s.as_str()), v)) + } +} + +impl<'a> Ini { + /// Immutable iterate though sections + pub fn iter(&'a self) -> SectionIter<'a> { + SectionIter { + inner: self.sections.iter(), + } + } + + /// Mutable iterate though sections + #[deprecated(note = "Use `iter_mut` instead!")] + pub fn mut_iter(&'a mut self) -> SectionIterMut<'a> { + self.iter_mut() + } + + /// Mutable iterate though sections + pub fn iter_mut(&'a mut self) -> SectionIterMut<'a> { + SectionIterMut { + inner: self.sections.iter_mut(), + } + } +} + +impl<'a> IntoIterator for &'a Ini { + type Item = (Option<&'a str>, &'a Properties); + type IntoIter = SectionIter<'a>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a> IntoIterator for &'a mut Ini { + type Item = (Option<&'a str>, &'a mut Properties); + type IntoIter = SectionIterMut<'a>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +// Ini parser +struct Parser<'a> { + ch: Option, + rdr: Chars<'a>, + line: usize, + col: usize, + opt: ParseOption, +} + +#[derive(Debug)] +/// Parse error +pub struct ParseError { + pub line: usize, + pub col: usize, + pub msg: String, +} + +impl Display for ParseError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}:{} {}", self.line, self.col, self.msg) + } +} + +impl error::Error for ParseError {} + +/// Error while parsing an INI document +#[derive(Debug)] +pub enum Error { + Io(io::Error), + Parse(ParseError), +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Io(ref err) => err.fmt(f), + Error::Parse(ref err) => err.fmt(f), + } + } +} + +impl error::Error for Error { + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match *self { + Error::Io(ref err) => err.source(), + Error::Parse(ref err) => err.source(), + } + } +} + +impl From for Error { + fn from(err: io::Error) -> Self { + Error::Io(err) + } +} + +impl<'a> Parser<'a> { + // Create a parser + pub fn new(rdr: Chars<'a>, opt: ParseOption) -> Parser<'a> { + let mut p = Parser { + ch: None, + line: 0, + col: 0, + rdr, + opt, + }; + p.bump(); + p + } + + fn eof(&self) -> bool { + self.ch.is_none() + } + + fn bump(&mut self) { + self.ch = self.rdr.next(); + match self.ch { + Some('\n') => { + self.line += 1; + self.col = 0; + } + Some(..) => { + self.col += 1; + } + None => {} + } + } + + fn error>(&self, msg: M) -> Result { + Err(ParseError { + line: self.line + 1, + col: self.col + 1, + msg: msg.into(), + }) + } + + /// Consume all the white space until the end of the line or a tab + fn parse_whitespace(&mut self) { + while let Some(c) = self.ch { + if !c.is_whitespace() && c != '\n' && c != '\t' && c != '\r' { + break; + } + self.bump(); + } + } + + /// Consume all the white space except line break + fn parse_whitespace_except_line_break(&mut self) { + while let Some(c) = self.ch { + if (c == '\n' || c == '\r' || !c.is_whitespace()) && c != '\t' { + break; + } + self.bump(); + } + } + + /// Parse the whole INI input + pub fn parse(&mut self) -> Result { + let mut result = Ini::new(); + let mut curkey: String = "".into(); + let mut cursec: Option = None; + + self.parse_whitespace(); + while let Some(cur_ch) = self.ch { + match cur_ch { + ';' | '#' => { + if cfg!(not(feature = "inline-comment")) { + // Inline comments is not supported, so comments must starts from a new line + // + // https://en.wikipedia.org/wiki/INI_file#Comments + if self.col > 1 { + return self.error("doesn't support inline comment"); + } + } + + self.parse_comment(); + } + '[' => match self.parse_section() { + Ok(sec) => { + let msec = sec[..].trim(); + cursec = Some((*msec).to_string()); + match result.entry(cursec.clone()) { + SectionEntry::Vacant(v) => { + v.insert(Default::default()); + } + SectionEntry::Occupied(mut o) => { + o.append(Default::default()); + } + } + } + Err(e) => return Err(e), + }, + '=' | ':' => { + if (&curkey[..]).is_empty() { + return self.error("missing key"); + } + match self.parse_val() { + Ok(val) => { + let mval = val[..].trim().to_owned(); + match result.entry(cursec.clone()) { + SectionEntry::Vacant(v) => { + // cursec must be None (the General Section) + let mut prop = Properties::new(); + prop.insert(curkey, mval); + v.insert(prop); + } + SectionEntry::Occupied(mut o) => { + // Insert into the last (current) section + o.last_mut().append(curkey, mval); + } + } + curkey = "".into(); + } + Err(e) => return Err(e), + } + } + _ => match self.parse_key() { + Ok(key) => { + let mkey: String = key[..].trim().to_owned(); + curkey = mkey; + } + Err(e) => return Err(e), + }, + } + + self.parse_whitespace(); + } + + Ok(result) + } + + fn parse_comment(&mut self) { + while let Some(c) = self.ch { + self.bump(); + if c == '\n' { + break; + } + } + } + + fn parse_str_until(&mut self, endpoint: &[Option], check_inline_comment: bool) -> Result { + let mut result: String = String::new(); + + while !endpoint.contains(&self.ch) { + match self.ch { + None => { + return self.error(format!("expecting \"{:?}\" but found EOF.", endpoint)); + } + #[cfg(feature = "inline-comment")] + Some(space) if check_inline_comment && (space == ' ' || space == '\t') => { + self.bump(); + + match self.ch { + Some('#') | Some(';') => { + // [space]#, [space]; starts an inline comment + break; + } + Some(_) => { + result.push(space); + continue; + } + None => { + result.push(space); + } + } + } + Some('\\') if self.opt.enabled_escape => { + self.bump(); + if self.eof() { + return self.error(format!("expecting \"{:?}\" but found EOF.", endpoint)); + } + match self.ch.unwrap() { + '0' => result.push('\0'), + 'a' => result.push('\x07'), + 'b' => result.push('\x08'), + 't' => result.push('\t'), + 'r' => result.push('\r'), + 'n' => result.push('\n'), + '\n' => (), + 'x' => { + // Unicode 4 character + let mut code: String = String::with_capacity(4); + for _ in 0..4 { + self.bump(); + if self.eof() { + return self.error(format!("expecting \"{:?}\" but found EOF.", endpoint)); + } else if let Some('\\') = self.ch { + self.bump(); + if self.ch != Some('\n') { + return self.error(format!( + "expecting \"\\\\n\" but \ + found \"{:?}\".", + self.ch + )); + } + } + code.push(self.ch.unwrap()); + } + let r = u32::from_str_radix(&code[..], 16); + match r { + Ok(c) => match char::from_u32(c) { + Some(c) => result.push(c), + None => { + return self.error("unknown character in \\xHH form"); + } + }, + Err(_) => return self.error("unknown character in \\xHH form"), + } + } + c => result.push(c), + } + } + Some(c) => { + result.push(c); + } + } + self.bump(); + } + + let _ = check_inline_comment; + Ok(result) + } + + fn parse_section(&mut self) -> Result { + cfg_if! { + if #[cfg(feature = "brackets-in-section-names")] { + // Skip [ + self.bump(); + + let mut s = match self.parse_str_until(&[Some('\r'), Some('\n')], cfg!(feature = "inline-comment")) { + Ok(r) => r, + Err(err) => return Err(err) + }; + + // Deal with inline comment + #[cfg(feature = "inline-comment")] + if matches!(self.ch, Some('#') | Some(';')) { + self.parse_comment(); + } + + let tr = s.trim_end_matches(|c| c == ' ' || c == '\t'); + if !tr.ends_with(']') { + return self.error("section must be ended with ']'"); + } + + s.truncate(tr.len() - 1); + Ok(s) + } else { + // Skip [ + self.bump(); + let sec = self.parse_str_until(&[Some(']')], false)?; + if let Some(']') = self.ch { + self.bump(); + } + + // Deal with inline comment + #[cfg(feature = "inline-comment")] + if matches!(self.ch, Some('#') | Some(';')) { + self.parse_comment(); + } + + Ok(sec) + } + } + } + + fn parse_key(&mut self) -> Result { + self.parse_str_until(&[Some('='), Some(':')], false) + } + + fn parse_val(&mut self) -> Result { + self.bump(); + // Issue #35: Allow empty value + self.parse_whitespace_except_line_break(); + + match self.ch { + None => Ok(String::new()), + Some('"') if self.opt.enabled_quote => { + self.bump(); + self.parse_str_until(&[Some('"')], false).and_then(|s| { + self.bump(); // Eats the last " + // Parse until EOL + self.parse_str_until_eol(cfg!(feature = "inline-comment")) + .map(|x| s + &x) + }) + } + Some('\'') if self.opt.enabled_quote => { + self.bump(); + self.parse_str_until(&[Some('\'')], false).and_then(|s| { + self.bump(); // Eats the last ' + // Parse until EOL + self.parse_str_until_eol(cfg!(feature = "inline-comment")) + .map(|x| s + &x) + }) + } + _ => self.parse_str_until_eol(cfg!(feature = "inline-comment")), + } + } + + #[inline] + fn parse_str_until_eol(&mut self, check_inline_comment: bool) -> Result { + let r = self.parse_str_until(&[Some('\n'), Some('\r'), None], check_inline_comment)?; + + #[cfg(feature = "inline-comment")] + if check_inline_comment && matches!(self.ch, Some('#') | Some(';')) { + self.parse_comment(); + } + + Ok(r) + } +} + +// ------------------------------------------------------------------------------ + +#[cfg(test)] +mod test { + use std::env::temp_dir; + + use super::*; + + #[test] + fn property_replace() { + let mut props = Properties::new(); + props.insert("k1", "v1"); + + assert_eq!(Some("v1"), props.get("k1")); + let res = props.get_all("k1").collect::>(); + assert_eq!(res, vec!["v1"]); + + props.insert("k1", "v2"); + assert_eq!(Some("v2"), props.get("k1")); + + let res = props.get_all("k1").collect::>(); + assert_eq!(res, vec!["v2"]); + } + + #[test] + fn property_get_vec() { + let mut props = Properties::new(); + props.append("k1", "v1"); + + assert_eq!(Some("v1"), props.get("k1")); + + props.append("k1", "v2"); + + assert_eq!(Some("v1"), props.get("k1")); + + let res = props.get_all("k1").collect::>(); + assert_eq!(res, vec!["v1", "v2"]); + + let res = props.get_all("k2").collect::>(); + assert!(res.is_empty()); + } + + #[test] + fn property_remove() { + let mut props = Properties::new(); + props.append("k1", "v1"); + props.append("k1", "v2"); + + let res = props.remove_all("k1").collect::>(); + assert_eq!(res, vec!["v1", "v2"]); + assert!(!props.contains_key("k1")); + } + + #[test] + fn load_from_str_with_empty_general_section() { + let input = "[sec1]\nkey1=val1\n"; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + + let mut output = opt.unwrap(); + assert_eq!(output.len(), 2); + + assert!(output.general_section().is_empty()); + assert!(output.general_section_mut().is_empty()); + + let props1 = output.section(None::).unwrap(); + assert!(props1.is_empty()); + let props2 = output.section(Some("sec1")).unwrap(); + assert_eq!(props2.len(), 1); + assert_eq!(props2.get("key1"), Some("val1")); + } + + #[test] + fn load_from_str_with_empty_input() { + let input = ""; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + + let mut output = opt.unwrap(); + assert!(output.general_section().is_empty()); + assert!(output.general_section_mut().is_empty()); + assert_eq!(output.len(), 1); + } + + #[test] + fn load_from_str_with_empty_lines() { + let input = "\n\n\n"; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + + let mut output = opt.unwrap(); + assert!(output.general_section().is_empty()); + assert!(output.general_section_mut().is_empty()); + assert_eq!(output.len(), 1); + } + + #[test] + #[cfg(not(feature = "brackets-in-section-names"))] + fn load_from_str_with_valid_input() { + let input = "[sec1]\nkey1=val1\nkey2=377\n[sec2]foo=bar\n"; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + + let output = opt.unwrap(); + // there is always a general section + assert_eq!(output.len(), 3); + assert!(output.section(Some("sec1")).is_some()); + + let sec1 = output.section(Some("sec1")).unwrap(); + assert_eq!(sec1.len(), 2); + let key1: String = "key1".into(); + assert!(sec1.contains_key(&key1)); + let key2: String = "key2".into(); + assert!(sec1.contains_key(&key2)); + let val1: String = "val1".into(); + assert_eq!(sec1[&key1], val1); + let val2: String = "377".into(); + assert_eq!(sec1[&key2], val2); + } + + #[test] + #[cfg(feature = "brackets-in-section-names")] + fn load_from_str_with_valid_input() { + let input = "[sec1]\nkey1=val1\nkey2=377\n[sec2]\nfoo=bar\n"; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + + let output = opt.unwrap(); + // there is always a general section + assert_eq!(output.len(), 3); + assert!(output.section(Some("sec1")).is_some()); + + let sec1 = output.section(Some("sec1")).unwrap(); + assert_eq!(sec1.len(), 2); + let key1: String = "key1".into(); + assert!(sec1.contains_key(&key1)); + let key2: String = "key2".into(); + assert!(sec1.contains_key(&key2)); + let val1: String = "val1".into(); + assert_eq!(sec1[&key1], val1); + let val2: String = "377".into(); + assert_eq!(sec1[&key2], val2); + } + + #[test] + #[cfg(not(feature = "brackets-in-section-names"))] + fn load_from_str_without_ending_newline() { + let input = "[sec1]\nkey1=val1\nkey2=377\n[sec2]foo=bar"; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + } + + #[test] + #[cfg(feature = "brackets-in-section-names")] + fn load_from_str_without_ending_newline() { + let input = "[sec1]\nkey1=val1\nkey2=377\n[sec2]\nfoo=bar"; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + } + + #[test] + fn parse_error_numbers() { + let invalid_input = "\n\\x"; + let ini = Ini::load_from_str_opt( + invalid_input, + ParseOption { + enabled_escape: true, + ..Default::default() + }, + ); + assert!(!ini.is_ok()); + + let err = ini.unwrap_err(); + assert_eq!(err.line, 2); + assert_eq!(err.col, 3); + } + + #[test] + fn parse_comment() { + let input = "; abcdefghijklmn\n"; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + } + + #[cfg(not(feature = "inline-comment"))] + #[test] + fn inline_comment_not_supported() { + let input = " +[section name] +name = hello # abcdefg +gender = mail ; abdddd +"; + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!(ini.get_from(Some("section name"), "name").unwrap(), "hello # abcdefg"); + assert_eq!(ini.get_from(Some("section name"), "gender").unwrap(), "mail ; abdddd"); + } + + #[test] + #[cfg_attr(not(feature = "inline-comment"), should_panic)] + fn inline_comment() { + let input = " +[section name] # comment in section line +name = hello # abcdefg +gender = mail ; abdddd +address = web#url ;# eeeeee +phone = 01234 # tab before comment +phone2 = 56789 # tab + space before comment +phone3 = 43210 # space + tab before comment +"; + let ini = Ini::load_from_str(input).unwrap(); + println!("{:?}", ini.section(Some("section name"))); + assert_eq!(ini.get_from(Some("section name"), "name").unwrap(), "hello"); + assert_eq!(ini.get_from(Some("section name"), "gender").unwrap(), "mail"); + assert_eq!(ini.get_from(Some("section name"), "address").unwrap(), "web#url"); + assert_eq!(ini.get_from(Some("section name"), "phone").unwrap(), "01234"); + assert_eq!(ini.get_from(Some("section name"), "phone2").unwrap(), "56789"); + assert_eq!(ini.get_from(Some("section name"), "phone3").unwrap(), "43210"); + } + + #[test] + fn sharp_comment() { + let input = " +[section name] +name = hello +# abcdefg +"; + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!(ini.get_from(Some("section name"), "name").unwrap(), "hello"); + } + + #[test] + fn iter() { + let input = " +[section name] +name = hello # abcdefg +gender = mail ; abdddd +"; + let mut ini = Ini::load_from_str(input).unwrap(); + + for _ in &mut ini {} + for _ in &ini {} + // for _ in ini {} + } + + #[test] + fn colon() { + let input = " +[section name] +name: hello +gender : mail +"; + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!(ini.get_from(Some("section name"), "name").unwrap(), "hello"); + assert_eq!(ini.get_from(Some("section name"), "gender").unwrap(), "mail"); + } + + #[test] + fn string() { + let input = " +[section name] +# This is a comment +Key = \"Value\" +"; + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!(ini.get_from(Some("section name"), "Key").unwrap(), "Value"); + } + + #[test] + fn string_multiline() { + let input = " +[section name] +# This is a comment +Key = \"Value +Otherline\" +"; + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!(ini.get_from(Some("section name"), "Key").unwrap(), "Value\nOtherline"); + } + + #[test] + fn string_comment() { + let input = " +[section name] +# This is a comment +Key = \"Value # This is not a comment ; at all\" +Stuff = Other +"; + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!( + ini.get_from(Some("section name"), "Key").unwrap(), + "Value # This is not a comment ; at all" + ); + } + + #[test] + fn string_single() { + let input = " +[section name] +# This is a comment +Key = 'Value' +Stuff = Other +"; + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!(ini.get_from(Some("section name"), "Key").unwrap(), "Value"); + } + + #[test] + fn string_includes_quote() { + let input = " +[Test] +Comment[tr]=İnternet'e erişin +Comment[uk]=Доступ до Інтернету +"; + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!(ini.get_from(Some("Test"), "Comment[tr]").unwrap(), "İnternet'e erişin"); + } + + #[test] + fn string_single_multiline() { + let input = " +[section name] +# This is a comment +Key = 'Value +Otherline' +Stuff = Other +"; + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!(ini.get_from(Some("section name"), "Key").unwrap(), "Value\nOtherline"); + } + + #[test] + fn string_single_comment() { + let input = " +[section name] +# This is a comment +Key = 'Value # This is not a comment ; at all' +"; + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!( + ini.get_from(Some("section name"), "Key").unwrap(), + "Value # This is not a comment ; at all" + ); + } + + #[test] + fn load_from_str_with_valid_empty_input() { + let input = "key1=\nkey2=val2\n"; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + + let output = opt.unwrap(); + assert_eq!(output.len(), 1); + assert!(output.section(None::).is_some()); + + let sec1 = output.section(None::).unwrap(); + assert_eq!(sec1.len(), 2); + let key1: String = "key1".into(); + assert!(sec1.contains_key(&key1)); + let key2: String = "key2".into(); + assert!(sec1.contains_key(&key2)); + let val1: String = "".into(); + assert_eq!(sec1[&key1], val1); + let val2: String = "val2".into(); + assert_eq!(sec1[&key2], val2); + } + + #[test] + fn load_from_str_with_crlf() { + let input = "key1=val1\r\nkey2=val2\r\n"; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + + let output = opt.unwrap(); + assert_eq!(output.len(), 1); + assert!(output.section(None::).is_some()); + let sec1 = output.section(None::).unwrap(); + assert_eq!(sec1.len(), 2); + let key1: String = "key1".into(); + assert!(sec1.contains_key(&key1)); + let key2: String = "key2".into(); + assert!(sec1.contains_key(&key2)); + let val1: String = "val1".into(); + assert_eq!(sec1[&key1], val1); + let val2: String = "val2".into(); + assert_eq!(sec1[&key2], val2); + } + + #[test] + fn load_from_str_with_cr() { + let input = "key1=val1\rkey2=val2\r"; + let opt = Ini::load_from_str(input); + assert!(opt.is_ok()); + + let output = opt.unwrap(); + assert_eq!(output.len(), 1); + assert!(output.section(None::).is_some()); + let sec1 = output.section(None::).unwrap(); + assert_eq!(sec1.len(), 2); + let key1: String = "key1".into(); + assert!(sec1.contains_key(&key1)); + let key2: String = "key2".into(); + assert!(sec1.contains_key(&key2)); + let val1: String = "val1".into(); + assert_eq!(sec1[&key1], val1); + let val2: String = "val2".into(); + assert_eq!(sec1[&key2], val2); + } + + #[test] + #[cfg(not(feature = "brackets-in-section-names"))] + fn load_from_file_with_bom() { + let file_name = temp_dir().join("rust_ini_load_from_file_with_bom"); + + let file_content = b"\xEF\xBB\xBF[Test]Key=Value\n"; + + { + let mut file = File::create(&file_name).expect("create"); + file.write_all(file_content).expect("write"); + } + + let ini = Ini::load_from_file(&file_name).unwrap(); + assert_eq!(ini.get_from(Some("Test"), "Key"), Some("Value")); + } + + #[test] + #[cfg(feature = "brackets-in-section-names")] + fn load_from_file_with_bom() { + let file_name = temp_dir().join("rust_ini_load_from_file_with_bom"); + + let file_content = b"\xEF\xBB\xBF[Test]\nKey=Value\n"; + + { + let mut file = File::create(&file_name).expect("create"); + file.write_all(file_content).expect("write"); + } + + let ini = Ini::load_from_file(&file_name).unwrap(); + assert_eq!(ini.get_from(Some("Test"), "Key"), Some("Value")); + } + + #[test] + #[cfg(not(feature = "brackets-in-section-names"))] + fn load_from_file_without_bom() { + let file_name = temp_dir().join("rust_ini_load_from_file_without_bom"); + + let file_content = b"[Test]Key=Value\n"; + + { + let mut file = File::create(&file_name).expect("create"); + file.write_all(file_content).expect("write"); + } + + let ini = Ini::load_from_file(&file_name).unwrap(); + assert_eq!(ini.get_from(Some("Test"), "Key"), Some("Value")); + } + + #[test] + #[cfg(feature = "brackets-in-section-names")] + fn load_from_file_without_bom() { + let file_name = temp_dir().join("rust_ini_load_from_file_without_bom"); + + let file_content = b"[Test]\nKey=Value\n"; + + { + let mut file = File::create(&file_name).expect("create"); + file.write_all(file_content).expect("write"); + } + + let ini = Ini::load_from_file(&file_name).unwrap(); + assert_eq!(ini.get_from(Some("Test"), "Key"), Some("Value")); + } + + #[test] + fn get_with_non_static_key() { + let input = "key1=val1\nkey2=val2\n"; + let opt = Ini::load_from_str(input).unwrap(); + + let sec1 = opt.section(None::).unwrap(); + + let key = "key1".to_owned(); + sec1.get(&key).unwrap(); + } + + #[test] + fn load_from_str_noescape() { + let input = "path=C:\\Windows\\Some\\Folder\\"; + let opt = Ini::load_from_str_noescape(input); + assert!(opt.is_ok()); + + let output = opt.unwrap(); + assert_eq!(output.len(), 1); + let sec = output.section(None::).unwrap(); + assert_eq!(sec.len(), 1); + assert!(sec.contains_key("path")); + assert_eq!(&sec["path"], "C:\\Windows\\Some\\Folder\\"); + } + + #[test] + fn partial_quoting_double() { + let input = " +[Section] +A=\"quote\" arg0 +B=b"; + + let opt = Ini::load_from_str(input).unwrap(); + let sec = opt.section(Some("Section")).unwrap(); + assert_eq!(&sec["A"], "quote arg0"); + assert_eq!(&sec["B"], "b"); + } + + #[test] + fn partial_quoting_single() { + let input = " +[Section] +A='quote' arg0 +B=b"; + + let opt = Ini::load_from_str(input).unwrap(); + let sec = opt.section(Some("Section")).unwrap(); + assert_eq!(&sec["A"], "quote arg0"); + assert_eq!(&sec["B"], "b"); + } + + #[test] + fn parse_without_quote() { + let input = " +[Desktop Entry] +Exec = \"/path/to/exe with space\" arg +"; + + let opt = Ini::load_from_str_opt( + input, + ParseOption { + enabled_quote: false, + ..ParseOption::default() + }, + ) + .unwrap(); + let sec = opt.section(Some("Desktop Entry")).unwrap(); + assert_eq!(&sec["Exec"], "\"/path/to/exe with space\" arg"); + } + + #[test] + #[cfg(feature = "case-insensitive")] + fn case_insensitive() { + let input = " +[SecTION] +KeY=value +"; + + let ini = Ini::load_from_str(input).unwrap(); + let section = ini.section(Some("section")).unwrap(); + let val = section.get("key").unwrap(); + assert_eq!("value", val); + } + + #[test] + fn preserve_order_section() { + let input = r" +none2 = n2 +[SB] +p2 = 2 +[SA] +x2 = 2 +[SC] +cd1 = x +[xC] +xd = x + "; + + let data = Ini::load_from_str(input).unwrap(); + let keys: Vec> = data.iter().map(|(k, _)| k).collect(); + + assert_eq!(keys.len(), 5); + assert_eq!(keys[0], None); + assert_eq!(keys[1], Some("SB")); + assert_eq!(keys[2], Some("SA")); + assert_eq!(keys[3], Some("SC")); + assert_eq!(keys[4], Some("xC")); + } + + #[test] + fn preserve_order_property() { + let input = r" +x2 = n2 +x1 = n2 +x3 = n2 +"; + let data = Ini::load_from_str(input).unwrap(); + let section = data.general_section(); + let keys: Vec<&str> = section.iter().map(|(k, _)| k).collect(); + assert_eq!(keys, vec!["x2", "x1", "x3"]); + } + + #[test] + fn preserve_order_property_in_section() { + let input = r" +[s] +x2 = n2 +xb = n2 +a3 = n3 +"; + let data = Ini::load_from_str(input).unwrap(); + let section = data.section(Some("s")).unwrap(); + let keys: Vec<&str> = section.iter().map(|(k, _)| k).collect(); + assert_eq!(keys, vec!["x2", "xb", "a3"]) + } + + #[test] + fn preserve_order_write() { + let input = r" +x2 = n2 +x1 = n2 +x3 = n2 +[s] +x2 = n2 +xb = n2 +a3 = n3 +"; + let data = Ini::load_from_str(input).unwrap(); + let mut buf = vec![]; + data.write_to(&mut buf).unwrap(); + let new_data = Ini::load_from_str(&String::from_utf8(buf).unwrap()).unwrap(); + + let sec0 = new_data.general_section(); + let keys0: Vec<&str> = sec0.iter().map(|(k, _)| k).collect(); + assert_eq!(keys0, vec!["x2", "x1", "x3"]); + + let sec1 = new_data.section(Some("s")).unwrap(); + let keys1: Vec<&str> = sec1.iter().map(|(k, _)| k).collect(); + assert_eq!(keys1, vec!["x2", "xb", "a3"]); + } + + #[test] + fn write_new() { + use std::str; + + let ini = Ini::new(); + + let opt = WriteOption { + line_separator: LineSeparator::CR, + ..Default::default() + }; + let mut buf = Vec::new(); + ini.write_to_opt(&mut buf, opt).unwrap(); + + assert_eq!("", str::from_utf8(&buf).unwrap()); + } + + #[test] + fn write_line_separator() { + use std::str; + + let mut ini = Ini::new(); + ini.with_section(Some("Section1")) + .set("Key1", "Value") + .set("Key2", "Value"); + ini.with_section(Some("Section2")) + .set("Key1", "Value") + .set("Key2", "Value"); + + { + let mut buf = Vec::new(); + ini.write_to_opt( + &mut buf, + WriteOption { + line_separator: LineSeparator::CR, + ..Default::default() + }, + ) + .unwrap(); + + assert_eq!( + "[Section1]\nKey1=Value\nKey2=Value\n\n[Section2]\nKey1=Value\nKey2=Value\n", + str::from_utf8(&buf).unwrap() + ); + } + + { + let mut buf = Vec::new(); + ini.write_to_opt( + &mut buf, + WriteOption { + line_separator: LineSeparator::CRLF, + ..Default::default() + }, + ) + .unwrap(); + + assert_eq!( + "[Section1]\r\nKey1=Value\r\nKey2=Value\r\n\r\n[Section2]\r\nKey1=Value\r\nKey2=Value\r\n", + str::from_utf8(&buf).unwrap() + ); + } + + { + let mut buf = Vec::new(); + ini.write_to_opt( + &mut buf, + WriteOption { + line_separator: LineSeparator::SystemDefault, + ..Default::default() + }, + ) + .unwrap(); + + if cfg!(windows) { + assert_eq!( + "[Section1]\r\nKey1=Value\r\nKey2=Value\r\n\r\n[Section2]\r\nKey1=Value\r\nKey2=Value\r\n", + str::from_utf8(&buf).unwrap() + ); + } else { + assert_eq!( + "[Section1]\nKey1=Value\nKey2=Value\n\n[Section2]\nKey1=Value\nKey2=Value\n", + str::from_utf8(&buf).unwrap() + ); + } + } + } + + #[test] + fn write_kv_separator() { + use std::str; + + let mut ini = Ini::new(); + ini.with_section(Some("Section1")) + .set("Key1", "Value") + .set("Key2", "Value"); + ini.with_section(Some("Section2")) + .set("Key1", "Value") + .set("Key2", "Value"); + + let mut buf = Vec::new(); + ini.write_to_opt( + &mut buf, + WriteOption { + kv_separator: " = ", + ..Default::default() + }, + ) + .unwrap(); + + // Test different line endings in Windows and Unix + if cfg!(windows) { + assert_eq!( + "[Section1]\r\nKey1 = Value\r\nKey2 = Value\r\n\r\n[Section2]\r\nKey1 = Value\r\nKey2 = Value\r\n", + str::from_utf8(&buf).unwrap() + ); + } else { + assert_eq!( + "[Section1]\nKey1 = Value\nKey2 = Value\n\n[Section2]\nKey1 = Value\nKey2 = Value\n", + str::from_utf8(&buf).unwrap() + ); + } + } + + #[test] + fn duplicate_sections() { + // https://github.com/zonyitoo/rust-ini/issues/49 + + let input = r" +[Peer] +foo = a +bar = b + +[Peer] +foo = c +bar = d + +[Peer] +foo = e +bar = f +"; + + let ini = Ini::load_from_str(input).unwrap(); + assert_eq!(3, ini.section_all(Some("Peer")).count()); + + let mut iter = ini.iter(); + // there is always an empty general section + let (k0, p0) = iter.next().unwrap(); + assert_eq!(None, k0); + assert!(p0.is_empty()); + let (k1, p1) = iter.next().unwrap(); + assert_eq!(Some("Peer"), k1); + assert_eq!(Some("a"), p1.get("foo")); + assert_eq!(Some("b"), p1.get("bar")); + let (k2, p2) = iter.next().unwrap(); + assert_eq!(Some("Peer"), k2); + assert_eq!(Some("c"), p2.get("foo")); + assert_eq!(Some("d"), p2.get("bar")); + let (k3, p3) = iter.next().unwrap(); + assert_eq!(Some("Peer"), k3); + assert_eq!(Some("e"), p3.get("foo")); + assert_eq!(Some("f"), p3.get("bar")); + + assert_eq!(None, iter.next()); + } + + #[test] + fn new_has_empty_general_section() { + let mut ini = Ini::new(); + + assert!(ini.general_section().is_empty()); + assert!(ini.general_section_mut().is_empty()); + assert_eq!(ini.len(), 1); + } + + #[test] + fn fix_issue63() { + let section = "PHP"; + let key = "engine"; + let value = "On"; + let new_value = "Off"; + + // create a new configuration + let mut conf = Ini::new(); + conf.with_section(Some(section)).set(key, value); + + // assert the value is the one expected + let v = conf.get_from(Some(section), key).unwrap(); + assert_eq!(v, value); + + // update the section/key with a new value + conf.set_to(Some(section), key.to_string(), new_value.to_string()); + + // assert the new value was set + let v = conf.get_from(Some(section), key).unwrap(); + assert_eq!(v, new_value); + } + + #[test] + fn fix_issue64() { + let input = format!("some-key=åäö{}", super::DEFAULT_LINE_SEPARATOR); + + let conf = Ini::load_from_str(&input).unwrap(); + + let mut output = Vec::new(); + conf.write_to_policy(&mut output, EscapePolicy::Basics).unwrap(); + + assert_eq!(input, String::from_utf8(output).unwrap()); + } + + #[test] + fn invalid_codepoint() { + use std::io::Cursor; + + let d = vec![ + 10, 8, 68, 8, 61, 10, 126, 126, 61, 49, 10, 62, 8, 8, 61, 10, 91, 93, 93, 36, 91, 61, 10, 75, 91, 10, 10, + 10, 61, 92, 120, 68, 70, 70, 70, 70, 70, 126, 61, 10, 0, 0, 61, 10, 38, 46, 49, 61, 0, 39, 0, 0, 46, 92, + 120, 46, 36, 91, 91, 1, 0, 0, 16, 0, 0, 0, 0, 0, 0, + ]; + let mut file = Cursor::new(d); + assert!(Ini::read_from(&mut file).is_err()); + } + + #[test] + #[cfg(feature = "brackets-in-section-names")] + fn fix_issue84() { + let input = " +[[*]] +a = b +c = d +"; + let ini = Ini::load_from_str(input).unwrap(); + let sect = ini.section(Some("[*]")); + assert!(sect.is_some()); + assert!(sect.unwrap().contains_key("a")); + assert!(sect.unwrap().contains_key("c")); + } + + #[test] + #[cfg(feature = "brackets-in-section-names")] + fn fix_issue84_brackets_inside() { + let input = " +[a[b]c] +a = b +c = d +"; + let ini = Ini::load_from_str(input).unwrap(); + let sect = ini.section(Some("a[b]c")); + assert!(sect.is_some()); + assert!(sect.unwrap().contains_key("a")); + assert!(sect.unwrap().contains_key("c")); + } + + #[test] + #[cfg(feature = "brackets-in-section-names")] + fn fix_issue84_whitespaces_after_bracket() { + let input = " +[[*]]\t\t +a = b +c = d +"; + let ini = Ini::load_from_str(input).unwrap(); + let sect = ini.section(Some("[*]")); + assert!(sect.is_some()); + assert!(sect.unwrap().contains_key("a")); + assert!(sect.unwrap().contains_key("c")); + } + + #[test] + #[cfg(feature = "brackets-in-section-names")] + fn fix_issue84_not_whitespaces_after_bracket() { + let input = " +[[*]]xx +a = b +c = d +"; + let ini = Ini::load_from_str(input); + assert!(ini.is_err()); + } + + #[test] + fn escape_str_nothing_policy() { + let test_str = "\0\x07\n字'\"✨🍉杓"; + // This policy should never escape anything. + let policy = EscapePolicy::Nothing; + assert_eq!(escape_str(test_str, policy), test_str); + } + + #[test] + fn escape_str_basics() { + let test_backslash = r"\backslashes\"; + let test_nul = "string with \x00nulls\x00 in it"; + let test_controls = "|\x07| bell, |\x08| backspace, |\x7f| delete, |\x1b| escape"; + let test_whitespace = "\t \r\n"; + + assert_eq!(escape_str(test_backslash, EscapePolicy::Nothing), test_backslash); + assert_eq!(escape_str(test_nul, EscapePolicy::Nothing), test_nul); + assert_eq!(escape_str(test_controls, EscapePolicy::Nothing), test_controls); + assert_eq!(escape_str(test_whitespace, EscapePolicy::Nothing), test_whitespace); + + for policy in vec![ + EscapePolicy::Basics, EscapePolicy::BasicsUnicode, EscapePolicy::BasicsUnicodeExtended, + EscapePolicy::Reserved, EscapePolicy::ReservedUnicode, EscapePolicy::ReservedUnicodeExtended, + EscapePolicy::Everything, + ] { + assert_eq!(escape_str(test_backslash, policy), r"\\backslashes\\"); + assert_eq!(escape_str(test_nul, policy), r"string with \0nulls\0 in it"); + assert_eq!(escape_str(test_controls, policy), r"|\a| bell, |\b| backspace, |\x007f| delete, |\x001b| escape"); + assert_eq!(escape_str(test_whitespace, policy), r"\t \r\n"); + } + } + + #[test] + fn escape_str_reserved() { + // Test reserved characters. + let test_reserved = ":=;#"; + // And characters which are *not* reserved, but look like they might be. + let test_punctuation = "!@$%^&*()-_+/?.>,<[]{}``"; + + // These policies should *not* escape reserved characters. + for policy in vec![ + EscapePolicy::Nothing, + EscapePolicy::Basics, EscapePolicy::BasicsUnicode, EscapePolicy::BasicsUnicodeExtended, + ] { + assert_eq!(escape_str(test_reserved, policy), ":=;#"); + assert_eq!(escape_str(test_punctuation, policy), test_punctuation); + } + + // These should. + for policy in vec![ + EscapePolicy::Reserved, EscapePolicy::ReservedUnicodeExtended, EscapePolicy::ReservedUnicode, + EscapePolicy::Everything, + ] { + assert_eq!(escape_str(test_reserved, policy), r"\:\=\;\#"); + assert_eq!(escape_str(test_punctuation, policy), "!@$%^&*()-_+/?.>,<[]{}``"); + } + } + + #[test] + fn escape_str_unicode() { + // Test unicode escapes. + // The first are Basic Multilingual Plane (BMP) characters - i.e. <= U+FFFF + // Emoji are above U+FFFF (e.g. in the 1F???? range), and the CJK characters are in the U+20???? range. + // The last one is for codepoints at the edge of Rust's char type. + let test_unicode = r"é£∳字✨"; + let test_emoji = r"🐱😉"; + let test_cjk = r"𠈌𠕇"; + let test_high_points = "\u{10ABCD}\u{10FFFF}"; + + let policy = EscapePolicy::Nothing; + assert_eq!(escape_str(test_unicode, policy), test_unicode); + assert_eq!(escape_str(test_emoji, policy), test_emoji); + assert_eq!(escape_str(test_high_points, policy), test_high_points); + + // The "Unicode" policies should escape standard BMP unicode, but should *not* escape emoji or supplementary CJK codepoints. + // The Basics/Reserved policies should behave identically in this regard. + for policy in vec![EscapePolicy::BasicsUnicode, EscapePolicy::ReservedUnicode] { + assert_eq!(escape_str(test_unicode, policy), r"\x00e9\x00a3\x2233\x5b57\x2728"); + assert_eq!(escape_str(test_emoji, policy), test_emoji); + assert_eq!(escape_str(test_cjk, policy), test_cjk); + assert_eq!(escape_str(test_high_points, policy), test_high_points); + } + + // UnicodeExtended policies should escape both BMP and supplementary plane characters. + for policy in vec![EscapePolicy::BasicsUnicodeExtended, EscapePolicy::ReservedUnicodeExtended] { + assert_eq!(escape_str(test_unicode, policy), r"\x00e9\x00a3\x2233\x5b57\x2728"); + assert_eq!(escape_str(test_emoji, policy), r"\x1f431\x1f609"); + assert_eq!(escape_str(test_cjk, policy), r"\x2020c\x20547"); + assert_eq!(escape_str(test_high_points, policy), r"\x10abcd\x10ffff"); + } + } +} diff --git a/vendor/tiny-keccak/.cargo-checksum.json b/vendor/tiny-keccak/.cargo-checksum.json new file mode 100644 index 0000000..ac4cbd6 --- /dev/null +++ b/vendor/tiny-keccak/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.lock":"21610c43af26728947615128c38e407843f350ed88097aa01f6208bee9325100","Cargo.toml":"e8591a61b0fca0a73e34485db425e0fba0130a1ef31365e4b46ff16a7cdcb37a","LICENSE":"a2010f343487d3f7618affe54f789f5487602331c0a8d03f49e9a7c547cf0499","README.md":"44668b11e891dfdb78974e86ec42eefd45d44499715a3d83bd4a2200b892bc7d","benches/kangaroo.rs":"60e3eb7249dfd3985d49dabc726d0613cfaf9f3daf3b1d3ccbf47697733cb651","benches/keccak.rs":"7885e55621f95430638fbeb44ff4d347ba39e273a85547b3fbd42a11b1f45f12","build.rs":"c3e63c57ab0d28633f522c4a056c3230e9399fda6e511954da81ebfd00b216b2","examples/sha3.rs":"4502cd021e73fcfcb366f69dd6e808222a82148e5a5fe4b6bc2f7716ddcbb709","src/cshake.rs":"55428e5dd7acce4b5b8f3942ef193238381c1372fe0d5574a147cf24e1d7062b","src/k12.rs":"e1890350a70e51abf3e11b671cc1f32ec50e26ee0275166b71a0b087446731d9","src/keccak.rs":"e56adff13f8a55c2acfd1d6eaffdec8387870d85d4bffda96e9501da925b2af5","src/keccakf.rs":"318bddc0302a16a02b7bf85c02f4f02a29792c3c9581aba6946ccaa5c2034416","src/keccakp.rs":"90aae4a198381a8775b49309008b0ab2a1d892b8a5aac7cefa5e08a0c3923716","src/kmac.rs":"489805ef15caec95395fa0f4e31e51b40968ba43be155b24301a7f786ad06965","src/lib.rs":"c14112176bb359d24486f141a36370fe3a894c4dc7cdb952718a7509c93e424a","src/parallel_hash.rs":"2c91c75c8497de422dab69cfce762d3188a1ff1af604df20987058615ea18c23","src/sha3.rs":"e72aab9e536438114049f590a47c6df6250f0fffb6664dbcb85a87a151fb7e65","src/shake.rs":"dce5e46f5fda03abd42faecaa2b6d5ccc0abfd53aff59a67d63dec828fc02e2b","src/tuple_hash.rs":"2d5946a1095e9d046c6f74419c611d692cc74cfe354221d1ae48972828cd8609","tests/cshake.rs":"39193ab67320a3104bb73b4a3e83f1c516a845fdbed1ddfe714dcf6972a1ca68","tests/kangaroo.rs":"3a8c426f84317daecc6f82577a44cb74fd34311f29b5fa8db9b01817fc8ae262","tests/keccak.rs":"3577c04cc74e1c037d8cb1c03445ad6a9cf831d124451a97b2e66121b9140447","tests/kmac.rs":"1e4028cea536ebc3a52feecb99016f3389e6523c5f1ee5251476bc6d4de4e1ef","tests/parallel_hash.rs":"e067e3e9a2e28a191cfca5e507cfb18680fd35c89932aa7f1f744169bb23217c","tests/sha3.rs":"553c9b44954cd577fe5ca305ee340f3dd189c8f43c3cd949a223d9181ce4203c","tests/shake.rs":"fd1db640f3da13ec3e5dae1f37314f02cc147f7e874ea1a8f80370f2c7080a3a","tests/tuple_hash.rs":"36c80c3c6f5aa5acf22e2eed5bee8bc2c47d33412ae6c7418be4e6b6d9fd0a6a"},"package":"2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"} \ No newline at end of file diff --git a/vendor/tiny-keccak/Cargo.toml b/vendor/tiny-keccak/Cargo.toml new file mode 100644 index 0000000..97c780e --- /dev/null +++ b/vendor/tiny-keccak/Cargo.toml @@ -0,0 +1,93 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies +# +# If you believe there's an error in this file please file an +# issue against the rust-lang/cargo repository. If you're +# editing this file be aware that the upstream Cargo.toml +# will likely look very different (and much more reasonable) + +[package] +edition = "2018" +name = "tiny-keccak" +version = "2.0.2" +authors = ["debris "] +description = "An implementation of Keccak derived functions." +homepage = "https://github.com/debris/tiny-keccak" +keywords = ["hash", "sha3", "keccak", "crypto", "kangarootwelve"] +categories = ["cryptography", "no-std"] +license = "CC0-1.0" +[package.metadata.docs.rs] +all-features = true +[profile.dev] +opt-level = 3 +debug = false + +[profile.test] +opt-level = 3 +debug = false + +[[example]] +name = "sha3" +required-features = ["sha3"] + +[[test]] +name = "keccak" +required-features = ["keccak"] + +[[test]] +name = "cshake" +required-features = ["cshake"] + +[[test]] +name = "tuple_hash" +required-features = ["tuple_hash"] + +[[test]] +name = "kangaroo" +required-features = ["k12"] + +[[test]] +name = "sha3" +required-features = ["sha3"] + +[[test]] +name = "shake" +required-features = ["shake"] + +[[test]] +name = "kmac" +required-features = ["kmac"] + +[[test]] +name = "parallel_hash" +required-features = ["parallel_hash"] + +[[bench]] +name = "keccak" +required-features = ["keccak"] + +[[bench]] +name = "kangaroo" +required-features = ["k12"] +[dependencies.crunchy] +version = "0.2.2" + +[features] +cshake = [] +default = [] +fips202 = ["keccak", "shake", "sha3"] +k12 = [] +keccak = [] +kmac = ["cshake"] +parallel_hash = ["cshake"] +sha3 = [] +shake = [] +sp800 = ["cshake", "kmac", "tuple_hash"] +tuple_hash = ["cshake"] +[badges.travis-ci] +branch = "master" +repository = "debris/tiny-keccak" diff --git a/vendor/tiny-keccak/LICENSE b/vendor/tiny-keccak/LICENSE new file mode 100644 index 0000000..0e259d4 --- /dev/null +++ b/vendor/tiny-keccak/LICENSE @@ -0,0 +1,121 @@ +Creative Commons Legal Code + +CC0 1.0 Universal + + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. diff --git a/vendor/tiny-keccak/README.md b/vendor/tiny-keccak/README.md new file mode 100644 index 0000000..323169a --- /dev/null +++ b/vendor/tiny-keccak/README.md @@ -0,0 +1,70 @@ +# tiny-keccak + +An implementation of Keccak derived functions specified in [`FIPS-202`], [`SP800-185`] and [`KangarooTwelve`]. + +[![Build Status][travis-image]][travis-url] + +[travis-image]: https://travis-ci.org/debris/tiny-keccak.svg?branch=master +[travis-url]: https://travis-ci.org/debris/tiny-keccak +[`FIPS-202`]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +[`SP800-185`]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf +[`KangarooTwelve`]: https://eprint.iacr.org/2016/770.pdf + +[`Documentation`](https://docs.rs/tiny-keccak) + +The `Keccak-f[1600]` permutation is fully unrolled; it's nearly as fast +as the Keccak team's optimized permutation. + +## Usage + +In your `Cargo.toml` specify what features (hash functions, you are intending to use). +Available options are: `cshake`, `fips202`, `k12`, `keccak`, `kmac`, `parallel_hash`, `sha3`, +`shake`, `sp800`, `tuple_hash`. + +```toml +[dependencies] +tiny-keccak = { version = "2.0", features = ["sha3"] } +``` + +## Example + +```rust +use tiny_keccak::Sha3; + +fn main() { + let mut sha3 = Sha3::v256(); + let mut output = [0u8; 32]; + let expected = b"\ + \x64\x4b\xcc\x7e\x56\x43\x73\x04\x09\x99\xaa\xc8\x9e\x76\x22\xf3\ + \xca\x71\xfb\xa1\xd9\x72\xfd\x94\xa3\x1c\x3b\xfb\xf2\x4e\x39\x38\ + "; + + sha3.update(b"hello"); + sha3.update(b" "); + sha3.update(b"world"); + sha3.finalize(&mut output); + + assert_eq!(expected, &output); +} +``` + +## Benchmarks + +Benchmarked with [rust-crypto](https://github.com/RustCrypto) sha3 on: + +``` +MacBook Pro (Retina, 15-inch, Mid 2015) +2,5 GHz Intel Core i7 +16 GB 1600 MHz DDR3 +Intel Iris Pro 1536 MB +``` + +Benchmark code is available [here](https://github.com/debris/tiny-keccak/blob/master/comparison/benches/sha3.rs) + +``` +running 4 tests +test rust_crypto_sha3_256_input_32_bytes ... bench: 677 ns/iter (+/- 113) = 47 MB/s +test rust_crypto_sha3_256_input_4096_bytes ... bench: 17,619 ns/iter (+/- 4,174) = 232 MB/s +test tiny_keccak_sha3_256_input_32_bytes ... bench: 569 ns/iter (+/- 204) = 56 MB/s +test tiny_keccak_sha3_256_input_4096_bytes ... bench: 17,185 ns/iter (+/- 4,575) = 238 MB/s +``` diff --git a/vendor/tiny-keccak/benches/kangaroo.rs b/vendor/tiny-keccak/benches/kangaroo.rs new file mode 100644 index 0000000..45b69b1 --- /dev/null +++ b/vendor/tiny-keccak/benches/kangaroo.rs @@ -0,0 +1,19 @@ +#![feature(test)] + +extern crate test; + +use test::Bencher; +use tiny_keccak::{KangarooTwelve, Hasher}; + +#[bench] +fn bench_k12(b: &mut Bencher) { + let data = [0u8; 32]; + b.bytes = data.len() as u64; + + b.iter(|| { + let mut res = [0u8; 32]; + let mut k12 = KangarooTwelve::new(&[]); + k12.update(&data); + k12.finalize(&mut res); + }); +} diff --git a/vendor/tiny-keccak/benches/keccak.rs b/vendor/tiny-keccak/benches/keccak.rs new file mode 100644 index 0000000..7804c12 --- /dev/null +++ b/vendor/tiny-keccak/benches/keccak.rs @@ -0,0 +1,43 @@ +#![feature(test)] + +extern crate test; + +use test::Bencher; +use tiny_keccak::{keccakf, Keccak, Hasher}; + +#[bench] +fn bench_keccak_256_input_4096_bytes(b: &mut Bencher) { + let data = [254u8; 4096]; + b.bytes = data.len() as u64; + + b.iter(|| { + let mut res: [u8; 32] = [0; 32]; + let mut keccak = Keccak::v256(); + keccak.update(&data); + keccak.finalize(&mut res); + }); +} + +#[bench] +fn keccakf_u64(b: &mut Bencher) { + const WORDS: usize = 25; + b.bytes = (WORDS * 8) as u64; + + b.iter(|| { + let mut data = [0u64; WORDS]; + keccakf(&mut data); + }); +} + +#[bench] +fn bench_keccak256(b: &mut Bencher) { + let data = [0u8; 32]; + b.bytes = data.len() as u64; + + b.iter(|| { + let mut res: [u8; 32] = [0; 32]; + let mut keccak = Keccak::v256(); + keccak.update(&data); + keccak.finalize(&mut res); + }); +} diff --git a/vendor/tiny-keccak/build.rs b/vendor/tiny-keccak/build.rs new file mode 100644 index 0000000..80d0be2 --- /dev/null +++ b/vendor/tiny-keccak/build.rs @@ -0,0 +1,22 @@ +#[cfg(not(any( + feature = "keccak", + feature = "shake", + feature = "sha3", + feature = "cshake", + feature = "kmac", + feature = "tuple_hash", + feature = "parallel_hash", + feature = "k12", + feature = "fips202", + feature = "sp800" +)))] +compile_error!( + "You need to specify at least one hash function you intend to use. \ + Available options:\n\ + keccak, shake, sha3, cshake, kmac, tuple_hash, parallel_hash, k12, fips202, sp800\n\ + e.g.\n\ + tiny-keccak = { version = \"2.0.0\", features = [\"sha3\"] }" +); + +fn main() { +} diff --git a/vendor/tiny-keccak/examples/sha3.rs b/vendor/tiny-keccak/examples/sha3.rs new file mode 100644 index 0000000..bfdf799 --- /dev/null +++ b/vendor/tiny-keccak/examples/sha3.rs @@ -0,0 +1,17 @@ +use tiny_keccak::{Hasher, Sha3}; + +fn main() { + let mut sha3 = Sha3::v256(); + let mut output = [0; 32]; + let expected = b"\ + \x64\x4b\xcc\x7e\x56\x43\x73\x04\x09\x99\xaa\xc8\x9e\x76\x22\xf3\ + \xca\x71\xfb\xa1\xd9\x72\xfd\x94\xa3\x1c\x3b\xfb\xf2\x4e\x39\x38\ + "; + + sha3.update(b"hello"); + sha3.update(b" "); + sha3.update(b"world"); + sha3.finalize(&mut output); + + assert_eq!(expected, &output); +} diff --git a/vendor/tiny-keccak/src/cshake.rs b/vendor/tiny-keccak/src/cshake.rs new file mode 100644 index 0000000..d635e5b --- /dev/null +++ b/vendor/tiny-keccak/src/cshake.rs @@ -0,0 +1,77 @@ +//! The `cSHAKE` extendable-output functions defined in [`SP800-185`]. +//! +//! [`SP800-185`]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf + +use crate::{bits_to_rate, keccakf::KeccakF, left_encode, Hasher, KeccakState, Xof}; + +/// The `cSHAKE` extendable-output functions defined in [`SP800-185`]. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["cshake"] } +/// ``` +/// +/// [`SP800-185`]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf +#[derive(Clone)] +pub struct CShake { + state: KeccakState, +} + +impl CShake { + const DELIM: u8 = 0x04; + + /// Creates new [`CShake`] hasher with a security level of 128 bits. + /// + /// [`CShake`]: struct.CShake.html + pub fn v128(name: &[u8], custom_string: &[u8]) -> CShake { + CShake::new(name, custom_string, 128) + } + + /// Creates new [`CShake`] hasher with a security level of 256 bits. + /// + /// [`CShake`]: struct.CShake.html + pub fn v256(name: &[u8], custom_string: &[u8]) -> CShake { + CShake::new(name, custom_string, 256) + } + + pub(crate) fn new(name: &[u8], custom_string: &[u8], bits: usize) -> CShake { + let rate = bits_to_rate(bits); + // if there is no name and no customization string + // cSHAKE is SHAKE + if name.is_empty() && custom_string.is_empty() { + let state = KeccakState::new(rate, 0x1f); + return CShake { state }; + } + + let mut state = KeccakState::new(rate, Self::DELIM); + state.update(left_encode(rate).value()); + state.update(left_encode(name.len() * 8).value()); + state.update(name); + state.update(left_encode(custom_string.len() * 8).value()); + state.update(custom_string); + state.fill_block(); + CShake { state } + } + + pub(crate) fn fill_block(&mut self) { + self.state.fill_block(); + } +} + +impl Hasher for CShake { + fn update(&mut self, input: &[u8]) { + self.state.update(input); + } + + fn finalize(self, output: &mut [u8]) { + self.state.finalize(output); + } +} + +impl Xof for CShake { + fn squeeze(&mut self, output: &mut [u8]) { + self.state.squeeze(output); + } +} diff --git a/vendor/tiny-keccak/src/k12.rs b/vendor/tiny-keccak/src/k12.rs new file mode 100644 index 0000000..a205d3d --- /dev/null +++ b/vendor/tiny-keccak/src/k12.rs @@ -0,0 +1,160 @@ +//! The `KangarooTwelve` hash function defined [`here`]. +//! +//! [`here`]: https://eprint.iacr.org/2016/770.pdf + +use crate::{bits_to_rate, keccakp::KeccakP, EncodedLen, Hasher, IntoXof, KeccakState, Xof}; + +fn encode_len(len: usize) -> EncodedLen { + let len_view = (len as u64).to_be_bytes(); + let offset = len_view.iter().position(|i| *i != 0).unwrap_or(8); + let mut buffer = [0u8; 9]; + buffer[..8].copy_from_slice(&len_view); + buffer[8] = 8 - offset as u8; + + EncodedLen { offset, buffer } +} + +/// The `KangarooTwelve` hash function defined [`here`]. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["k12"] } +/// ``` +/// +/// [`here`]: https://eprint.iacr.org/2016/770.pdf +#[derive(Clone)] +pub struct KangarooTwelve { + state: KeccakState, + current_chunk: KeccakState, + custom_string: Option, + written: usize, + chunks: usize, +} + +impl KangarooTwelve { + const MAX_CHUNK_SIZE: usize = 8192; + + /// Creates new [`KangarooTwelve`] hasher with a security level of 128 bits. + /// + /// [`KangarooTwelve`]: struct.KangarooTwelve.html + pub fn new(custom_string: T) -> Self { + let rate = bits_to_rate(128); + KangarooTwelve { + state: KeccakState::new(rate, 0), + current_chunk: KeccakState::new(rate, 0x0b), + custom_string: Some(custom_string), + written: 0, + chunks: 0, + } + } +} + +impl> Hasher for KangarooTwelve { + fn update(&mut self, input: &[u8]) { + let mut to_absorb = input; + if self.chunks == 0 { + let todo = core::cmp::min(Self::MAX_CHUNK_SIZE - self.written, to_absorb.len()); + self.state.update(&to_absorb[..todo]); + self.written += todo; + to_absorb = &to_absorb[todo..]; + + if to_absorb.len() > 0 && self.written == Self::MAX_CHUNK_SIZE { + self.state.update(&[0x03, 0, 0, 0, 0, 0, 0, 0]); + self.written = 0; + self.chunks += 1; + } + } + + while to_absorb.len() > 0 { + if self.written == Self::MAX_CHUNK_SIZE { + let mut chunk_hash = [0u8; 32]; + let current_chunk = self.current_chunk.clone(); + self.current_chunk.reset(); + current_chunk.finalize(&mut chunk_hash); + self.state.update(&chunk_hash); + self.written = 0; + self.chunks += 1; + } + + let todo = core::cmp::min(Self::MAX_CHUNK_SIZE - self.written, to_absorb.len()); + self.current_chunk.update(&to_absorb[..todo]); + self.written += todo; + to_absorb = &to_absorb[todo..]; + } + } + + fn finalize(self, output: &mut [u8]) { + let mut xof = self.into_xof(); + xof.squeeze(output); + } +} + +/// The `KangarooTwelve` extendable-output function defined [`here`]. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["k12"] } +/// ``` +/// +/// # Example +/// +/// ``` +/// # use tiny_keccak::{KangarooTwelve, Xof, IntoXof, Hasher}; +/// let input = b"hello world"; +/// let mut output = [0u8; 64]; +/// let mut hasher = KangarooTwelve::new(b""); +/// hasher.update(input); +/// let mut xof = hasher.into_xof(); +/// xof.squeeze(&mut output[..32]); +/// xof.squeeze(&mut output[32..]); +/// ``` +/// +/// --- +/// +/// [`KangarooTwelveXof`] can be created only by using [`KangarooTwelve::IntoXof`] interface. +/// +/// [`here`]: https://eprint.iacr.org/2016/770.pdf +/// [`KangarooTwelveXof`]: struct.KangarooTwelveXof.html +/// [`KangarooTwelve::IntoXof`]: struct.KangarooTwelve.html#impl-IntoXof +#[derive(Clone)] +pub struct KangarooTwelveXof { + state: KeccakState, +} + +impl> IntoXof for KangarooTwelve { + type Xof = KangarooTwelveXof; + + fn into_xof(mut self) -> KangarooTwelveXof { + let custom_string = self + .custom_string + .take() + .expect("KangarooTwelve cannot be initialized without custom_string; qed"); + let encoded_len = encode_len(custom_string.as_ref().len()); + self.update(custom_string.as_ref()); + self.update(encoded_len.value()); + + if self.chunks == 0 { + self.state.delim = 0x07; + } else { + let encoded_chunks = encode_len(self.chunks); + let mut tmp_chunk = [0u8; 32]; + self.current_chunk.finalize(&mut tmp_chunk); + self.state.update(&tmp_chunk); + self.state.update(encoded_chunks.value()); + self.state.update(&[0xff, 0xff]); + self.state.delim = 0x06; + } + + KangarooTwelveXof { state: self.state } + } +} + +impl Xof for KangarooTwelveXof { + fn squeeze(&mut self, output: &mut [u8]) { + self.state.squeeze(output); + } +} diff --git a/vendor/tiny-keccak/src/keccak.rs b/vendor/tiny-keccak/src/keccak.rs new file mode 100644 index 0000000..a7db0b8 --- /dev/null +++ b/vendor/tiny-keccak/src/keccak.rs @@ -0,0 +1,93 @@ +//! The `Keccak` hash functions. + +use super::{bits_to_rate, keccakf::KeccakF, Hasher, KeccakState}; + +/// The `Keccak` hash functions defined in [`Keccak SHA3 submission`]. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["keccak"] } +/// ``` +/// +/// [`Keccak SHA3 submission`]: https://keccak.team/files/Keccak-submission-3.pdf +#[derive(Clone)] +pub struct Keccak { + state: KeccakState, +} + +impl Keccak { + const DELIM: u8 = 0x01; + + /// Creates new [`Keccak`] hasher with a security level of 224 bits. + /// + /// [`Keccak`]: struct.Keccak.html + pub fn v224() -> Keccak { + Keccak::new(224) + } + + /// Creates new [`Keccak`] hasher with a security level of 256 bits. + /// + /// [`Keccak`]: struct.Keccak.html + pub fn v256() -> Keccak { + Keccak::new(256) + } + + /// Creates new [`Keccak`] hasher with a security level of 384 bits. + /// + /// [`Keccak`]: struct.Keccak.html + pub fn v384() -> Keccak { + Keccak::new(384) + } + + /// Creates new [`Keccak`] hasher with a security level of 512 bits. + /// + /// [`Keccak`]: struct.Keccak.html + pub fn v512() -> Keccak { + Keccak::new(512) + } + + fn new(bits: usize) -> Keccak { + Keccak { + state: KeccakState::new(bits_to_rate(bits), Self::DELIM), + } + } +} + +impl Hasher for Keccak { + /// Absorb additional input. Can be called multiple times. + /// + /// # Example + /// + /// ``` + /// # use tiny_keccak::{Hasher, Keccak}; + /// # + /// # fn main() { + /// # let mut keccak = Keccak::v256(); + /// keccak.update(b"hello"); + /// keccak.update(b" world"); + /// # } + /// ``` + fn update(&mut self, input: &[u8]) { + self.state.update(input); + } + + /// Pad and squeeze the state to the output. + /// + /// # Example + /// + /// ``` + /// # use tiny_keccak::{Hasher, Keccak}; + /// # + /// # fn main() { + /// # let keccak = Keccak::v256(); + /// # let mut output = [0u8; 32]; + /// keccak.finalize(&mut output); + /// # } + /// # + /// ``` + fn finalize(self, output: &mut [u8]) { + self.state.finalize(output); + } +} diff --git a/vendor/tiny-keccak/src/keccakf.rs b/vendor/tiny-keccak/src/keccakf.rs new file mode 100644 index 0000000..ac9ab50 --- /dev/null +++ b/vendor/tiny-keccak/src/keccakf.rs @@ -0,0 +1,40 @@ +use crate::{Buffer, Permutation}; + +const ROUNDS: usize = 24; + +const RC: [u64; ROUNDS] = [ + 1u64, + 0x8082u64, + 0x800000000000808au64, + 0x8000000080008000u64, + 0x808bu64, + 0x80000001u64, + 0x8000000080008081u64, + 0x8000000000008009u64, + 0x8au64, + 0x88u64, + 0x80008009u64, + 0x8000000au64, + 0x8000808bu64, + 0x800000000000008bu64, + 0x8000000000008089u64, + 0x8000000000008003u64, + 0x8000000000008002u64, + 0x8000000000000080u64, + 0x800au64, + 0x800000008000000au64, + 0x8000000080008081u64, + 0x8000000000008080u64, + 0x80000001u64, + 0x8000000080008008u64, +]; + +keccak_function!("`keccak-f[1600, 24]`", keccakf, ROUNDS, RC); + +pub struct KeccakF; + +impl Permutation for KeccakF { + fn execute(buffer: &mut Buffer) { + keccakf(buffer.words()); + } +} diff --git a/vendor/tiny-keccak/src/keccakp.rs b/vendor/tiny-keccak/src/keccakp.rs new file mode 100644 index 0000000..f747d49 --- /dev/null +++ b/vendor/tiny-keccak/src/keccakp.rs @@ -0,0 +1,28 @@ +use crate::{Buffer, Permutation}; + +const ROUNDS: usize = 12; + +const RC: [u64; ROUNDS] = [ + 0x000000008000808b, + 0x800000000000008b, + 0x8000000000008089, + 0x8000000000008003, + 0x8000000000008002, + 0x8000000000000080, + 0x000000000000800a, + 0x800000008000000a, + 0x8000000080008081, + 0x8000000000008080, + 0x0000000080000001, + 0x8000000080008008, +]; + +keccak_function!("`keccak-p[1600, 12]`", keccakp, ROUNDS, RC); + +pub struct KeccakP; + +impl Permutation for KeccakP { + fn execute(buffer: &mut Buffer) { + keccakp(buffer.words()); + } +} diff --git a/vendor/tiny-keccak/src/kmac.rs b/vendor/tiny-keccak/src/kmac.rs new file mode 100644 index 0000000..d3741c7 --- /dev/null +++ b/vendor/tiny-keccak/src/kmac.rs @@ -0,0 +1,114 @@ +use crate::{bits_to_rate, left_encode, right_encode, CShake, Hasher, IntoXof, Xof}; + +/// The `KMAC` pseudo-random functions defined in [`SP800-185`]. +/// +/// The KECCAK Message Authentication Code (`KMAC`) algorithm is a `PRF` and keyed hash function based +/// on KECCAK. It provides variable-length output, and unlike [`SHAKE`] and [`cSHAKE`], altering the +/// requested output length generates a new, unrelated output. KMAC has two variants, [`KMAC128`] and +/// [`KMAC256`], built from [`cSHAKE128`] and [`cSHAKE256`], respectively. The two variants differ somewhat in +/// their technical security properties. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["kmac"] } +/// ``` +/// +/// [`SP800-185`]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf +/// [`KMAC128`]: struct.Kmac.html#method.v128 +/// [`KMAC256`]: struct.Kmac.html#method.v256 +/// [`SHAKE`]: struct.Shake.html +/// [`cSHAKE`]: struct.CShake.html +/// [`cSHAKE128`]: struct.CShake.html#method.v128 +/// [`cSHAKE256`]: struct.CShake.html#method.v256 +#[derive(Clone)] +pub struct Kmac { + state: CShake, +} + +impl Kmac { + /// Creates new [`Kmac`] hasher with a security level of 128 bits. + /// + /// [`Kmac`]: struct.Kmac.html + pub fn v128(key: &[u8], custom_string: &[u8]) -> Kmac { + Kmac::new(key, custom_string, 128) + } + + /// Creates new [`Kmac`] hasher with a security level of 256 bits. + /// + /// [`Kmac`]: struct.Kmac.html + pub fn v256(key: &[u8], custom_string: &[u8]) -> Kmac { + Kmac::new(key, custom_string, 256) + } + + fn new(key: &[u8], custom_string: &[u8], bits: usize) -> Kmac { + let rate = bits_to_rate(bits); + let mut state = CShake::new(b"KMAC", custom_string, bits); + state.update(left_encode(rate).value()); + state.update(left_encode(key.len() * 8).value()); + state.update(key); + state.fill_block(); + Kmac { state } + } +} + +impl Hasher for Kmac { + fn update(&mut self, input: &[u8]) { + self.state.update(input) + } + + fn finalize(mut self, output: &mut [u8]) { + self.state.update(right_encode(output.len() * 8).value()); + self.state.finalize(output) + } +} + +/// The `KMACXOF` extendable-output functions defined in [`SP800-185`]. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["kmac"] } +/// ``` +/// +/// # Example +/// +/// ``` +/// # use tiny_keccak::{Kmac, Xof, IntoXof, Hasher}; +/// let input = b"hello world"; +/// let mut output = [0u8; 64]; +/// let mut kmac = Kmac::v256(b"", b""); +/// kmac.update(input); +/// let mut xof = kmac.into_xof(); +/// xof.squeeze(&mut output[..32]); +/// xof.squeeze(&mut output[32..]); +/// ``` +/// +/// --- +/// +/// [`KmacXof`] can be created only by using [`Kmac::IntoXof`] interface. +/// +/// [`SP800-185`]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf +/// [`KmacXof`]: struct.KmacXof.html +/// [`Kmac::IntoXof`]: struct.Kmac.html#impl-IntoXof +#[derive(Clone)] +pub struct KmacXof { + state: CShake, +} + +impl IntoXof for Kmac { + type Xof = KmacXof; + + fn into_xof(mut self) -> Self::Xof { + self.state.update(right_encode(0).value()); + KmacXof { state: self.state } + } +} + +impl Xof for KmacXof { + fn squeeze(&mut self, output: &mut [u8]) { + self.state.squeeze(output) + } +} diff --git a/vendor/tiny-keccak/src/lib.rs b/vendor/tiny-keccak/src/lib.rs new file mode 100644 index 0000000..9329fd3 --- /dev/null +++ b/vendor/tiny-keccak/src/lib.rs @@ -0,0 +1,501 @@ +//! Keccak derived functions specified in [`FIPS-202`], [`SP800-185`] and [`KangarooTwelve`]. +//! +//! # Example +//! +//! ``` +//! # use tiny_keccak::Hasher; +//! # +//! # fn foo(mut hasher: H) { +//! let input_a = b"hello world"; +//! let input_b = b"!"; +//! let mut output = [0u8; 32]; +//! hasher.update(input_a); +//! hasher.update(input_b); +//! hasher.finalize(&mut output); +//! # } +//! ``` +//! +//! # Credits +//! +//! - [`coruus/keccak-tiny`] for C implementation of keccak function +//! - [`@quininer`] for `no-std` support and rust implementation [`SP800-185`] +//! - [`mimoo/GoKangarooTwelve`] for GO implementation of `KangarooTwelve` +//! - [`@Vurich`] for optimizations +//! - [`@oleganza`] for adding support for half-duplex use +//! +//! # License +//! +//! [`CC0`]. Attribution kindly requested. Blame taken too, +//! but not liability. +//! +//! [`FIPS-202`]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +//! [`SP800-185`]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf +//! [`KangarooTwelve`]: https://eprint.iacr.org/2016/770.pdf +//! [`coruus/keccak-tiny`]: https://github.com/coruus/keccak-tiny +//! [`mimoo/GoKangarooTwelve`]: https://github.com/mimoo/GoKangarooTwelve +//! [`@quininer`]: https://github.com/quininer +//! [`@Vurich`]: https://github.com/Vurich +//! [`@oleganza`]: https://github.com/oleganza +//! [`CC0`]: https://github.com/debris/tiny-keccak/blob/master/LICENSE + +#![no_std] +#![deny(missing_docs)] + +const RHO: [u32; 24] = [ + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62, 18, 39, 61, 20, 44, +]; + +const PI: [usize; 24] = [ + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20, 14, 22, 9, 6, 1, +]; + +const WORDS: usize = 25; + +macro_rules! keccak_function { + ($doc: expr, $name: ident, $rounds: expr, $rc: expr) => { + #[doc = $doc] + #[allow(unused_assignments)] + #[allow(non_upper_case_globals)] + pub fn $name(a: &mut [u64; $crate::WORDS]) { + use crunchy::unroll; + + for i in 0..$rounds { + let mut array: [u64; 5] = [0; 5]; + + // Theta + unroll! { + for x in 0..5 { + unroll! { + for y_count in 0..5 { + let y = y_count * 5; + array[x] ^= a[x + y]; + } + } + } + } + + unroll! { + for x in 0..5 { + unroll! { + for y_count in 0..5 { + let y = y_count * 5; + a[y + x] ^= array[(x + 4) % 5] ^ array[(x + 1) % 5].rotate_left(1); + } + } + } + } + + // Rho and pi + let mut last = a[1]; + unroll! { + for x in 0..24 { + array[0] = a[$crate::PI[x]]; + a[$crate::PI[x]] = last.rotate_left($crate::RHO[x]); + last = array[0]; + } + } + + // Chi + unroll! { + for y_step in 0..5 { + let y = y_step * 5; + + unroll! { + for x in 0..5 { + array[x] = a[y + x]; + } + } + + unroll! { + for x in 0..5 { + a[y + x] = array[x] ^ ((!array[(x + 1) % 5]) & (array[(x + 2) % 5])); + } + } + } + }; + + // Iota + a[0] ^= $rc[i]; + } + } + } +} + +#[cfg(feature = "k12")] +mod keccakp; + +#[cfg(feature = "k12")] +pub use keccakp::keccakp; + +#[cfg(any( + feature = "keccak", + feature = "shake", + feature = "sha3", + feature = "cshake", + feature = "kmac", + feature = "tuple_hash", + feature = "parallel_hash" +))] +mod keccakf; + +#[cfg(any( + feature = "keccak", + feature = "shake", + feature = "sha3", + feature = "cshake", + feature = "kmac", + feature = "tuple_hash", + feature = "parallel_hash" +))] +pub use keccakf::keccakf; + +#[cfg(feature = "k12")] +mod k12; + +#[cfg(feature = "k12")] +pub use k12::{KangarooTwelve, KangarooTwelveXof}; + +#[cfg(feature = "keccak")] +mod keccak; + +#[cfg(feature = "keccak")] +pub use keccak::Keccak; + +#[cfg(feature = "shake")] +mod shake; + +#[cfg(feature = "shake")] +pub use shake::Shake; + +#[cfg(feature = "sha3")] +mod sha3; + +#[cfg(feature = "sha3")] +pub use sha3::Sha3; + +#[cfg(feature = "cshake")] +mod cshake; + +#[cfg(feature = "cshake")] +pub use cshake::CShake; + +#[cfg(feature = "kmac")] +mod kmac; + +#[cfg(feature = "kmac")] +pub use kmac::{Kmac, KmacXof}; + +#[cfg(feature = "tuple_hash")] +mod tuple_hash; + +#[cfg(feature = "tuple_hash")] +pub use tuple_hash::{TupleHash, TupleHashXof}; + +#[cfg(feature = "parallel_hash")] +mod parallel_hash; + +#[cfg(feature = "parallel_hash")] +pub use parallel_hash::{ParallelHash, ParallelHashXof}; + +/// A trait for hashing an arbitrary stream of bytes. +/// +/// # Example +/// +/// ``` +/// # use tiny_keccak::Hasher; +/// # +/// # fn foo(mut hasher: H) { +/// let input_a = b"hello world"; +/// let input_b = b"!"; +/// let mut output = [0u8; 32]; +/// hasher.update(input_a); +/// hasher.update(input_b); +/// hasher.finalize(&mut output); +/// # } +/// ``` +pub trait Hasher { + /// Absorb additional input. Can be called multiple times. + fn update(&mut self, input: &[u8]); + + /// Pad and squeeze the state to the output. + fn finalize(self, output: &mut [u8]); +} + +/// A trait used to convert [`Hasher`] into it's [`Xof`] counterpart. +/// +/// # Example +/// +/// ``` +/// # use tiny_keccak::IntoXof; +/// # +/// # fn foo(hasher: H) { +/// let xof = hasher.into_xof(); +/// # } +/// ``` +/// +/// [`Hasher`]: trait.Hasher.html +/// [`Xof`]: trait.Xof.html +pub trait IntoXof { + /// A type implementing [`Xof`], eXtendable-output function interface. + /// + /// [`Xof`]: trait.Xof.html + type Xof: Xof; + + /// A method used to convert type into [`Xof`]. + /// + /// [`Xof`]: trait.Xof.html + fn into_xof(self) -> Self::Xof; +} + +/// Extendable-output function (`XOF`) is a function on bit strings in which the output can be +/// extended to any desired length. +/// +/// # Example +/// +/// ``` +/// # use tiny_keccak::Xof; +/// # +/// # fn foo(mut xof: X) { +/// let mut output = [0u8; 64]; +/// xof.squeeze(&mut output[0..32]); +/// xof.squeeze(&mut output[32..]); +/// # } +/// ``` +pub trait Xof { + /// A method used to retrieve another part of hash function output. + fn squeeze(&mut self, output: &mut [u8]); +} + +struct EncodedLen { + offset: usize, + buffer: [u8; 9], +} + +impl EncodedLen { + fn value(&self) -> &[u8] { + &self.buffer[self.offset..] + } +} + +fn left_encode(len: usize) -> EncodedLen { + let mut buffer = [0u8; 9]; + buffer[1..].copy_from_slice(&(len as u64).to_be_bytes()); + let offset = buffer.iter().position(|i| *i != 0).unwrap_or(8); + buffer[offset - 1] = 9 - offset as u8; + + EncodedLen { + offset: offset - 1, + buffer, + } +} + +fn right_encode(len: usize) -> EncodedLen { + let mut buffer = [0u8; 9]; + buffer[..8].copy_from_slice(&(len as u64).to_be_bytes()); + let offset = buffer.iter().position(|i| *i != 0).unwrap_or(7); + buffer[8] = 8 - offset as u8; + EncodedLen { offset, buffer } +} + +#[derive(Default, Clone)] +struct Buffer([u64; WORDS]); + +impl Buffer { + fn words(&mut self) -> &mut [u64; WORDS] { + &mut self.0 + } + + #[cfg(target_endian = "little")] + #[inline] + fn execute(&mut self, offset: usize, len: usize, f: F) { + let buffer: &mut [u8; WORDS * 8] = unsafe { core::mem::transmute(&mut self.0) }; + f(&mut buffer[offset..][..len]); + } + + #[cfg(target_endian = "big")] + #[inline] + fn execute(&mut self, offset: usize, len: usize, f: F) { + fn swap_endianess(buffer: &mut [u64]) { + for item in buffer { + *item = item.swap_bytes(); + } + } + + let start = offset / 8; + let end = (offset + len + 7) / 8; + swap_endianess(&mut self.0[start..end]); + let buffer: &mut [u8; WORDS * 8] = unsafe { core::mem::transmute(&mut self.0) }; + f(&mut buffer[offset..][..len]); + swap_endianess(&mut self.0[start..end]); + } + + fn setout(&mut self, dst: &mut [u8], offset: usize, len: usize) { + self.execute(offset, len, |buffer| dst[..len].copy_from_slice(buffer)); + } + + fn xorin(&mut self, src: &[u8], offset: usize, len: usize) { + self.execute(offset, len, |dst| { + assert!(dst.len() <= src.len()); + let len = dst.len(); + let mut dst_ptr = dst.as_mut_ptr(); + let mut src_ptr = src.as_ptr(); + for _ in 0..len { + unsafe { + *dst_ptr ^= *src_ptr; + src_ptr = src_ptr.offset(1); + dst_ptr = dst_ptr.offset(1); + } + } + }); + } + + fn pad(&mut self, offset: usize, delim: u8, rate: usize) { + self.execute(offset, 1, |buff| buff[0] ^= delim); + self.execute(rate - 1, 1, |buff| buff[0] ^= 0x80); + } +} + +trait Permutation { + fn execute(a: &mut Buffer); +} + +#[derive(Clone, Copy)] +enum Mode { + Absorbing, + Squeezing, +} + +struct KeccakState

{ + buffer: Buffer, + offset: usize, + rate: usize, + delim: u8, + mode: Mode, + permutation: core::marker::PhantomData

, +} + +impl

Clone for KeccakState

{ + fn clone(&self) -> Self { + KeccakState { + buffer: self.buffer.clone(), + offset: self.offset, + rate: self.rate, + delim: self.delim, + mode: self.mode, + permutation: core::marker::PhantomData, + } + } +} + +impl KeccakState

{ + fn new(rate: usize, delim: u8) -> Self { + assert!(rate != 0, "rate cannot be equal 0"); + KeccakState { + buffer: Buffer::default(), + offset: 0, + rate, + delim, + mode: Mode::Absorbing, + permutation: core::marker::PhantomData, + } + } + + fn keccak(&mut self) { + P::execute(&mut self.buffer); + } + + fn update(&mut self, input: &[u8]) { + if let Mode::Squeezing = self.mode { + self.mode = Mode::Absorbing; + self.fill_block(); + } + + //first foldp + let mut ip = 0; + let mut l = input.len(); + let mut rate = self.rate - self.offset; + let mut offset = self.offset; + while l >= rate { + self.buffer.xorin(&input[ip..], offset, rate); + self.keccak(); + ip += rate; + l -= rate; + rate = self.rate; + offset = 0; + } + + self.buffer.xorin(&input[ip..], offset, l); + self.offset = offset + l; + } + + fn pad(&mut self) { + self.buffer.pad(self.offset, self.delim, self.rate); + } + + fn squeeze(&mut self, output: &mut [u8]) { + if let Mode::Absorbing = self.mode { + self.mode = Mode::Squeezing; + self.pad(); + self.fill_block(); + } + + // second foldp + let mut op = 0; + let mut l = output.len(); + let mut rate = self.rate - self.offset; + let mut offset = self.offset; + while l >= rate { + self.buffer.setout(&mut output[op..], offset, rate); + self.keccak(); + op += rate; + l -= rate; + rate = self.rate; + offset = 0; + } + + self.buffer.setout(&mut output[op..], offset, l); + self.offset = offset + l; + } + + fn finalize(mut self, output: &mut [u8]) { + self.squeeze(output); + } + + fn fill_block(&mut self) { + self.keccak(); + self.offset = 0; + } + + fn reset(&mut self) { + self.buffer = Buffer::default(); + self.offset = 0; + self.mode = Mode::Absorbing; + } +} + +fn bits_to_rate(bits: usize) -> usize { + 200 - bits / 4 +} + +#[cfg(test)] +mod tests { + use crate::{left_encode, right_encode}; + + #[test] + fn test_left_encode() { + assert_eq!(left_encode(0).value(), &[1, 0]); + assert_eq!(left_encode(128).value(), &[1, 128]); + assert_eq!(left_encode(65536).value(), &[3, 1, 0, 0]); + assert_eq!(left_encode(4096).value(), &[2, 16, 0]); + assert_eq!(left_encode(54321).value(), &[2, 212, 49]); + } + + #[test] + fn test_right_encode() { + assert_eq!(right_encode(0).value(), &[0, 1]); + assert_eq!(right_encode(128).value(), &[128, 1]); + assert_eq!(right_encode(65536).value(), &[1, 0, 0, 3]); + assert_eq!(right_encode(4096).value(), &[16, 0, 2]); + assert_eq!(right_encode(54321).value(), &[212, 49, 2]); + } +} diff --git a/vendor/tiny-keccak/src/parallel_hash.rs b/vendor/tiny-keccak/src/parallel_hash.rs new file mode 100644 index 0000000..cc45818 --- /dev/null +++ b/vendor/tiny-keccak/src/parallel_hash.rs @@ -0,0 +1,206 @@ +use crate::{left_encode, right_encode, CShake, Hasher, IntoXof, Xof}; + +#[derive(Clone)] +struct UnfinishedState { + state: CShake, + absorbed: usize, +} + +struct Suboutout { + state: [u8; 64], + size: usize, +} + +impl Suboutout { + fn security(bits: usize) -> Suboutout { + Suboutout { + state: [0u8; 64], + // 128 => 32, 256 => 64 + size: bits / 4, + } + } + + #[inline] + fn as_bytes(&self) -> &[u8] { + &self.state[..self.size] + } + + #[inline] + fn as_bytes_mut(&mut self) -> &mut [u8] { + &mut self.state[..self.size] + } +} + +/// The `ParallelHash` hash functions defined in [`SP800-185`]. +/// +/// The purpose of `ParallelHash` is to support the efficient hashing of very long strings, by +/// taking advantage of the parallelism available in modern processors. `ParallelHash` supports the +/// [`128-bit`] and [`256-bit`] security strengths, and also provides variable-length output. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["parallel_hash"] } +/// ``` +/// +/// [`SP800-185`]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf +/// [`128-bit`]: struct.ParallelHash.html#method.v128 +/// [`256-bit`]: struct.ParallelHash.html#method.v256 +#[derive(Clone)] +pub struct ParallelHash { + state: CShake, + block_size: usize, + bits: usize, + blocks: usize, + unfinished: Option, +} + +impl ParallelHash { + /// Creates new [`ParallelHash`] hasher with a security level of 128 bits. + /// + /// [`ParallelHash`]: struct.ParallelHash.html + pub fn v128(custom_string: &[u8], block_size: usize) -> ParallelHash { + ParallelHash::new(custom_string, block_size, 128) + } + + /// Creates new [`ParallelHash`] hasher with a security level of 256 bits. + /// + /// [`ParallelHash`]: struct.ParallelHash.html + pub fn v256(custom_string: &[u8], block_size: usize) -> ParallelHash { + ParallelHash::new(custom_string, block_size, 256) + } + + fn new(custom_string: &[u8], block_size: usize, bits: usize) -> ParallelHash { + let mut state = CShake::new(b"ParallelHash", custom_string, bits); + state.update(left_encode(block_size).value()); + ParallelHash { + state, + block_size, + bits, + blocks: 0, + unfinished: None, + } + } +} + +impl Hasher for ParallelHash { + fn update(&mut self, mut input: &[u8]) { + if let Some(mut unfinished) = self.unfinished.take() { + let to_absorb = self.block_size - unfinished.absorbed; + if input.len() >= to_absorb { + unfinished.state.update(&input[..to_absorb]); + input = &input[to_absorb..]; + + let mut suboutput = Suboutout::security(self.bits); + unfinished.state.finalize(suboutput.as_bytes_mut()); + self.state.update(suboutput.as_bytes()); + self.blocks += 1; + } else { + unfinished.state.update(input); + unfinished.absorbed += input.len(); + self.unfinished = Some(unfinished); + return; + } + } + + let bits = self.bits; + let input_blocks_end = input.len() / self.block_size * self.block_size; + let input_blocks = &input[..input_blocks_end]; + let input_end = &input[input_blocks_end..]; + let parts = input_blocks.chunks(self.block_size).map(|chunk| { + let mut state = CShake::new(b"", b"", bits); + state.update(chunk); + let mut suboutput = Suboutout::security(bits); + state.finalize(suboutput.as_bytes_mut()); + suboutput + }); + + for part in parts { + self.state.update(part.as_bytes()); + self.blocks += 1; + } + + if !input_end.is_empty() { + assert!(self.unfinished.is_none()); + let mut state = CShake::new(b"", b"", bits); + state.update(input_end); + self.unfinished = Some(UnfinishedState { + state, + absorbed: input_end.len(), + }); + } + } + + fn finalize(mut self, output: &mut [u8]) { + if let Some(unfinished) = self.unfinished.take() { + let mut suboutput = Suboutout::security(self.bits); + unfinished.state.finalize(suboutput.as_bytes_mut()); + self.state.update(suboutput.as_bytes()); + self.blocks += 1; + } + + self.state.update(right_encode(self.blocks).value()); + self.state.update(right_encode(output.len() * 8).value()); + self.state.finalize(output); + } +} + +/// The `ParallelHashXOF` extendable-output functions defined in [`SP800-185`]. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["parallel_hash"] } +/// ``` +/// +/// # Example +/// +/// ``` +/// # use tiny_keccak::{ParallelHash, Xof, IntoXof, Hasher}; +/// let input = b"hello world"; +/// let mut output = [0u8; 64]; +/// let mut hasher = ParallelHash::v256(b"", 8); +/// hasher.update(input); +/// let mut xof = hasher.into_xof(); +/// xof.squeeze(&mut output[..32]); +/// xof.squeeze(&mut output[32..]); +/// ``` +/// +/// --- +/// +/// [`ParallelHashXof`] can be created only by using [`ParallelHash::IntoXof`] interface. +/// +/// +/// [`SP800-185`]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf +/// [`ParallelHashXof`]: struct.ParallelHashXof.html +/// [`ParallelHash::IntoXof`]: struct.ParallelHash.html#impl-IntoXof +#[derive(Clone)] +pub struct ParallelHashXof { + state: CShake, +} + +impl IntoXof for ParallelHash { + type Xof = ParallelHashXof; + + fn into_xof(mut self) -> Self::Xof { + if let Some(unfinished) = self.unfinished.take() { + let mut suboutput = Suboutout::security(self.bits); + unfinished.state.finalize(suboutput.as_bytes_mut()); + self.state.update(suboutput.as_bytes()); + self.blocks += 1; + } + + self.state.update(right_encode(self.blocks).value()); + self.state.update(right_encode(0).value()); + + ParallelHashXof { state: self.state } + } +} + +impl Xof for ParallelHashXof { + fn squeeze(&mut self, output: &mut [u8]) { + self.state.squeeze(output); + } +} diff --git a/vendor/tiny-keccak/src/sha3.rs b/vendor/tiny-keccak/src/sha3.rs new file mode 100644 index 0000000..71326a2 --- /dev/null +++ b/vendor/tiny-keccak/src/sha3.rs @@ -0,0 +1,83 @@ +use crate::{bits_to_rate, keccakf::KeccakF, Hasher, KeccakState}; + +/// The `SHA3` hash functions defined in [`FIPS-202`]. +/// +/// [`FIPS-202`]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["sha3"] } +/// ``` +/// +/// # Example +/// +/// ``` +/// # use tiny_keccak::{Hasher, Sha3}; +/// # +/// # fn main() { +/// let input = b"hello world"; +/// let mut output = [0; 32]; +/// let expected = b"\ +/// \x64\x4b\xcc\x7e\x56\x43\x73\x04\x09\x99\xaa\xc8\x9e\x76\x22\xf3\ +/// \xca\x71\xfb\xa1\xd9\x72\xfd\x94\xa3\x1c\x3b\xfb\xf2\x4e\x39\x38\ +/// "; +/// let mut sha3 = Sha3::v256(); +/// sha3.update(input); +/// sha3.finalize(&mut output); +/// assert_eq!(expected, &output); +/// # } +/// ``` +#[derive(Clone)] +pub struct Sha3 { + state: KeccakState, +} + +impl Sha3 { + const DELIM: u8 = 0x06; + + /// Creates new [`Sha3`] hasher with a security level of 224 bits. + /// + /// [`Sha3`]: struct.Sha3.html + pub fn v224() -> Sha3 { + Sha3::new(224) + } + + /// Creates new [`Sha3`] hasher with a security level of 256 bits. + /// + /// [`Sha3`]: struct.Sha3.html + pub fn v256() -> Sha3 { + Sha3::new(256) + } + + /// Creates new [`Sha3`] hasher with a security level of 384 bits. + /// + /// [`Sha3`]: struct.Sha3.html + pub fn v384() -> Sha3 { + Sha3::new(384) + } + + /// Creates new [`Sha3`] hasher with a security level of 512 bits. + /// + /// [`Sha3`]: struct.Sha3.html + pub fn v512() -> Sha3 { + Sha3::new(512) + } + + fn new(bits: usize) -> Sha3 { + Sha3 { + state: KeccakState::new(bits_to_rate(bits), Self::DELIM), + } + } +} + +impl Hasher for Sha3 { + fn update(&mut self, input: &[u8]) { + self.state.update(input); + } + + fn finalize(self, output: &mut [u8]) { + self.state.finalize(output); + } +} diff --git a/vendor/tiny-keccak/src/shake.rs b/vendor/tiny-keccak/src/shake.rs new file mode 100644 index 0000000..fb9015e --- /dev/null +++ b/vendor/tiny-keccak/src/shake.rs @@ -0,0 +1,56 @@ +use crate::{bits_to_rate, keccakf::KeccakF, Hasher, KeccakState, Xof}; + +/// The `SHAKE` extendable-output functions defined in [`FIPS-202`]. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["shake"] } +/// ``` +/// +/// [`FIPS-202`]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf +#[derive(Clone)] +pub struct Shake { + state: KeccakState, +} + +impl Shake { + const DELIM: u8 = 0x1f; + + /// Creates new [`Shake`] hasher with a security level of 128 bits. + /// + /// [`Shake`]: struct.Shake.html + pub fn v128() -> Shake { + Shake::new(128) + } + + /// Creates new [`Shake`] hasher with a security level of 256 bits. + /// + /// [`Shake`]: struct.Shake.html + pub fn v256() -> Shake { + Shake::new(256) + } + + pub(crate) fn new(bits: usize) -> Shake { + Shake { + state: KeccakState::new(bits_to_rate(bits), Self::DELIM), + } + } +} + +impl Hasher for Shake { + fn update(&mut self, input: &[u8]) { + self.state.update(input); + } + + fn finalize(self, output: &mut [u8]) { + self.state.finalize(output); + } +} + +impl Xof for Shake { + fn squeeze(&mut self, output: &mut [u8]) { + self.state.squeeze(output) + } +} diff --git a/vendor/tiny-keccak/src/tuple_hash.rs b/vendor/tiny-keccak/src/tuple_hash.rs new file mode 100644 index 0000000..a23ab14 --- /dev/null +++ b/vendor/tiny-keccak/src/tuple_hash.rs @@ -0,0 +1,106 @@ +use crate::{left_encode, right_encode, CShake, Hasher, IntoXof, Xof}; + +/// The `TupleHash` hash functions defined in [`SP800-185`]. +/// +/// `TupleHash` is designed to provide a generic, misuse-resistant way to combine a sequence of +/// strings for hashing such that, for example, a `TupleHash` computed on the tuple (`"abc"` ,`"d"`) will +/// produce a different hash value than a `TupleHash` computed on the tuple (`"ab"`,`"cd"`), even though +/// all the remaining input parameters are kept the same, and the two resulting concatenated +/// strings, without string encoding, are identical. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["tuple_hash"] } +/// ``` +/// +/// [`SP800-185`]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf +#[derive(Clone)] +pub struct TupleHash { + state: CShake, +} + +impl TupleHash { + /// Creates new [`TupleHash`] hasher with a security level of 128 bits. + /// + /// [`TupleHash`]: struct.TupleHash.html + pub fn v128(custom_string: &[u8]) -> TupleHash { + TupleHash::new(custom_string, 128) + } + + /// Creates new [`TupleHash`] hasher with a security level of 256 bits. + /// + /// [`TupleHash`]: struct.TupleHash.html + pub fn v256(custom_string: &[u8]) -> TupleHash { + TupleHash::new(custom_string, 256) + } + + fn new(custom_string: &[u8], bits: usize) -> TupleHash { + TupleHash { + state: CShake::new(b"TupleHash", custom_string, bits), + } + } +} + +impl Hasher for TupleHash { + fn update(&mut self, input: &[u8]) { + self.state.update(left_encode(input.len() * 8).value()); + self.state.update(input) + } + + fn finalize(mut self, output: &mut [u8]) { + self.state.update(right_encode(output.len() * 8).value()); + self.state.finalize(output) + } +} + +/// The `TupleHashXOF` extendable-output functions defined in [`SP800-185`]. +/// +/// # Usage +/// +/// ```toml +/// [dependencies] +/// tiny-keccak = { version = "2.0.0", features = ["tuple_hash"] } +/// ``` +/// +/// # Example +/// +/// ``` +/// # use tiny_keccak::{TupleHash, Xof, IntoXof, Hasher}; +/// let input = b"hello world"; +/// let mut output = [0u8; 64]; +/// let mut hasher = TupleHash::v256(b""); +/// hasher.update(input); +/// let mut xof = hasher.into_xof(); +/// xof.squeeze(&mut output[..32]); +/// xof.squeeze(&mut output[32..]); +/// ``` +/// +/// --- +/// +/// [`TupleHashXof`] can be created only by using [`TupleHash::IntoXof`] interface. +/// +/// +/// [`SP800-185`]: https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-185.pdf +/// [`TupleHashXof`]: struct.TupleHashXof.html +/// [`TupleHash::IntoXof`]: struct.TupleHash.html#impl-IntoXof +#[derive(Clone)] +pub struct TupleHashXof { + state: CShake, +} + +impl IntoXof for TupleHash { + type Xof = TupleHashXof; + + fn into_xof(mut self) -> TupleHashXof { + self.state.update(right_encode(0).value()); + TupleHashXof { state: self.state } + } +} + +impl Xof for TupleHashXof { + fn squeeze(&mut self, output: &mut [u8]) { + self.state.squeeze(output) + } +} diff --git a/vendor/tiny-keccak/tests/cshake.rs b/vendor/tiny-keccak/tests/cshake.rs new file mode 100644 index 0000000..a963d55 --- /dev/null +++ b/vendor/tiny-keccak/tests/cshake.rs @@ -0,0 +1,116 @@ +use tiny_keccak::{CShake, Hasher, Xof}; + +#[test] +fn test_cshake128_one() { + let input = b"\x00\x01\x02\x03"; + let mut output = [0u8; 32]; + let name = b""; + let custom = b"Email Signature"; + let expected = b"\ + \xC1\xC3\x69\x25\xB6\x40\x9A\x04\xF1\xB5\x04\xFC\xBC\xA9\xD8\x2B\ + \x40\x17\x27\x7C\xB5\xED\x2B\x20\x65\xFC\x1D\x38\x14\xD5\xAA\xF5\ + "; + + let mut cshake = CShake::v128(name, custom); + cshake.update(input); + cshake.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_cshake128_two() { + let input = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\ + \x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\ + \x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F\ + \x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + \x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F\ + \x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\x7C\x7D\x7E\x7F\ + \x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8A\x8B\x8C\x8D\x8E\x8F\ + \x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9A\x9B\x9C\x9D\x9E\x9F\ + \xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7\xA8\xA9\xAA\xAB\xAC\xAD\xAE\xAF\ + \xB0\xB1\xB2\xB3\xB4\xB5\xB6\xB7\xB8\xB9\xBA\xBB\xBC\xBD\xBE\xBF\ + \xC0\xC1\xC2\xC3\xC4\xC5\xC6\xC7\ + "; + let mut output = [0u8; 32]; + let name = b""; + let custom = b"Email Signature"; + let expected = b"\ + \xC5\x22\x1D\x50\xE4\xF8\x22\xD9\x6A\x2E\x88\x81\xA9\x61\x42\x0F\ + \x29\x4B\x7B\x24\xFE\x3D\x20\x94\xBA\xED\x2C\x65\x24\xCC\x16\x6B\ + "; + + let mut cshake = CShake::v128(name, custom); + cshake.update(input); + cshake.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_cshake256_one() { + let input = b"\x00\x01\x02\x03"; + let mut output = [0u8; 64]; + let name = b""; + let custom = b"Email Signature"; + let expected = b"\ + \xD0\x08\x82\x8E\x2B\x80\xAC\x9D\x22\x18\xFF\xEE\x1D\x07\x0C\x48\ + \xB8\xE4\xC8\x7B\xFF\x32\xC9\x69\x9D\x5B\x68\x96\xEE\xE0\xED\xD1\ + \x64\x02\x0E\x2B\xE0\x56\x08\x58\xD9\xC0\x0C\x03\x7E\x34\xA9\x69\ + \x37\xC5\x61\xA7\x4C\x41\x2B\xB4\xC7\x46\x46\x95\x27\x28\x1C\x8C\ + "; + + let mut cshake = CShake::v256(name, custom); + cshake.update(input); + cshake.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_cshake256_two() { + let input = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\ + \x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\ + \x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F\ + \x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + \x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F\ + \x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\x7C\x7D\x7E\x7F\ + \x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8A\x8B\x8C\x8D\x8E\x8F\ + \x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9A\x9B\x9C\x9D\x9E\x9F\ + \xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7\xA8\xA9\xAA\xAB\xAC\xAD\xAE\xAF\ + \xB0\xB1\xB2\xB3\xB4\xB5\xB6\xB7\xB8\xB9\xBA\xBB\xBC\xBD\xBE\xBF\ + \xC0\xC1\xC2\xC3\xC4\xC5\xC6\xC7\ + "; + let mut output = [0u8; 64]; + let name = b""; + let custom = b"Email Signature"; + let expected = b"\ + \x07\xDC\x27\xB1\x1E\x51\xFB\xAC\x75\xBC\x7B\x3C\x1D\x98\x3E\x8B\ + \x4B\x85\xFB\x1D\xEF\xAF\x21\x89\x12\xAC\x86\x43\x02\x73\x09\x17\ + \x27\xF4\x2B\x17\xED\x1D\xF6\x3E\x8E\xC1\x18\xF0\x4B\x23\x63\x3C\ + \x1D\xFB\x15\x74\xC8\xFB\x55\xCB\x45\xDA\x8E\x25\xAF\xB0\x92\xBB\ + "; + let mut cshake = CShake::v256(name, custom); + cshake.update(input); + cshake.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_cshake_as_shake() { + let mut shake = CShake::v128(&[], &[]); + let mut output = [0; 32]; + let expected = b"\ + \x43\xE4\x1B\x45\xA6\x53\xF2\xA5\xC4\x49\x2C\x1A\xDD\x54\x45\x12\ + \xDD\xA2\x52\x98\x33\x46\x2B\x71\xA4\x1A\x45\xBE\x97\x29\x0B\x6F\ + "; + + for _ in 0..16 { + shake.squeeze(&mut output); + } + + assert_eq!(expected, &output); +} diff --git a/vendor/tiny-keccak/tests/kangaroo.rs b/vendor/tiny-keccak/tests/kangaroo.rs new file mode 100644 index 0000000..4492539 --- /dev/null +++ b/vendor/tiny-keccak/tests/kangaroo.rs @@ -0,0 +1,86 @@ +use tiny_keccak::{Hasher, KangarooTwelve}; + +fn pattern(len: usize) -> Vec { + (0..len).map(|j| (j % 251) as u8).collect() +} + +fn test_kangaroo_twelve, B: AsRef<[u8]>>( + custom_string: A, + message: B, + output_len: usize, + expected: &[u8], +) { + let mut kangaroo = KangarooTwelve::new(custom_string.as_ref()); + kangaroo.update(message.as_ref()); + let mut res = vec![0; output_len]; + kangaroo.finalize(&mut res); + assert_eq!(&res[output_len - expected.len()..], expected); +} + +#[test] +fn empty_kangaroo_twelve() { + let expected = b"\ + \x1a\xc2\xd4\x50\xfc\x3b\x42\x05\xd1\x9d\xa7\xbf\xca\x1b\x37\x51\ + \x3c\x08\x03\x57\x7a\xc7\x16\x7f\x06\xfe\x2c\xe1\xf0\xef\x39\xe5\ + "; + test_kangaroo_twelve("", "", 32, expected); +} + +#[test] +fn kangaroo_twelve_long() { + let expected = b"\ + \xe8\xdc\x56\x36\x42\xf7\x22\x8c\x84\x68\x4c\x89\x84\x05\xd3\xa8\ + \x34\x79\x91\x58\xc0\x79\xb1\x28\x80\x27\x7a\x1d\x28\xe2\xff\x6d\ + "; + test_kangaroo_twelve("", "", 10032, expected); +} + +#[test] +fn kangaroo_twelve_with_message() { + let expected = b"\ + \x2b\xda\x92\x45\x0e\x8b\x14\x7f\x8a\x7c\xb6\x29\xe7\x84\xa0\x58\ + \xef\xca\x7c\xf7\xd8\x21\x8e\x02\xd3\x45\xdf\xaa\x65\x24\x4a\x1f\ + "; + test_kangaroo_twelve("", pattern(1), 32, expected); +} + +#[test] +fn kangaroo_twelve_with_message2() { + let expected = b"\ + \x6b\xf7\x5f\xa2\x23\x91\x98\xdb\x47\x72\xe3\x64\x78\xf8\xe1\x9b\ + \x0f\x37\x12\x05\xf6\xa9\xa9\x3a\x27\x3f\x51\xdf\x37\x12\x28\x88\ + "; + test_kangaroo_twelve("", pattern(17), 32, expected); +} + +#[test] +fn kangaroo_twelve_with_custom_string() { + let expected = b"\ + \xfa\xb6\x58\xdb\x63\xe9\x4a\x24\x61\x88\xbf\x7a\xf6\x9a\x13\x30\ + \x45\xf4\x6e\xe9\x84\xc5\x6e\x3c\x33\x28\xca\xaf\x1a\xa1\xa5\x83\ + "; + test_kangaroo_twelve(pattern(1), "", 32, expected); +} + +#[test] +fn kangaroo_twelve_with_custom_string_and_message() { + let expected = b"\ + \xd8\x48\xc5\x06\x8c\xed\x73\x6f\x44\x62\x15\x9b\x98\x67\xfd\x4c\ + \x20\xb8\x08\xac\xc3\xd5\xbc\x48\xe0\xb0\x6b\xa0\xa3\x76\x2e\xc4\ + "; + test_kangaroo_twelve(pattern(41), &[0xff], 32, expected); +} + +#[test] +fn kangaroo_twelve_with_custom_string_and_message2() { + let expected = b"\ + \x75\xd2\xf8\x6a\x2e\x64\x45\x66\x72\x6b\x4f\xbc\xfc\x56\x57\xb9\ + \xdb\xcf\x07\x0c\x7b\x0d\xca\x06\x45\x0a\xb2\x91\xd7\x44\x3b\xcf\ + "; + test_kangaroo_twelve( + pattern(68921), + &[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff], + 32, + expected, + ); +} diff --git a/vendor/tiny-keccak/tests/keccak.rs b/vendor/tiny-keccak/tests/keccak.rs new file mode 100644 index 0000000..8204fe7 --- /dev/null +++ b/vendor/tiny-keccak/tests/keccak.rs @@ -0,0 +1,30 @@ +use tiny_keccak::{Hasher, Keccak}; + +#[test] +fn empty_keccak() { + let keccak = Keccak::v256(); + let mut output = [0; 32]; + let expected = b"\ + \xc5\xd2\x46\x01\x86\xf7\x23\x3c\x92\x7e\x7d\xb2\xdc\xc7\x03\xc0\ + \xe5\x00\xb6\x53\xca\x82\x27\x3b\x7b\xfa\xd8\x04\x5d\x85\xa4\x70\ + "; + + keccak.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn string_keccak_256() { + let mut keccak = Keccak::v256(); + let mut in_and_out: [u8; 32] = [0; 32]; + for i in 1..6 { + in_and_out[i as usize - 1] = i + } + let expected = b"\ + \x7d\x87\xc5\xea\x75\xf7\x37\x8b\xb7\x01\xe4\x04\xc5\x06\x39\x16\ + \x1a\xf3\xef\xf6\x62\x93\xe9\xf3\x75\xb5\xf1\x7e\xb5\x04\x76\xf4\ + "; + keccak.update(&in_and_out[0..5]); + keccak.finalize(&mut in_and_out); + assert_eq!(expected, &in_and_out); +} diff --git a/vendor/tiny-keccak/tests/kmac.rs b/vendor/tiny-keccak/tests/kmac.rs new file mode 100644 index 0000000..1b41f9d --- /dev/null +++ b/vendor/tiny-keccak/tests/kmac.rs @@ -0,0 +1,333 @@ +use tiny_keccak::{Hasher, IntoXof, Kmac, Xof}; + +#[test] +fn test_kmac128_one() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\x00\x01\x02\x03"; + let custom = b""; + let expected = b"\ + \xE5\x78\x0B\x0D\x3E\xA6\xF7\xD3\xA4\x29\xC5\x70\x6A\xA4\x3A\x00\ + \xFA\xDB\xD7\xD4\x96\x28\x83\x9E\x31\x87\x24\x3F\x45\x6E\xE1\x4E\ + "; + let mut output = [0u8; 32]; + + let mut kmac = Kmac::v128(key, custom); + kmac.update(data); + kmac.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_kmac128_two() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\x00\x01\x02\x03"; + let custom = b"My Tagged Application"; + let expected = b"\ + \x3B\x1F\xBA\x96\x3C\xD8\xB0\xB5\x9E\x8C\x1A\x6D\x71\x88\x8B\x71\ + \x43\x65\x1A\xF8\xBA\x0A\x70\x70\xC0\x97\x9E\x28\x11\x32\x4A\xA5\ + "; + let mut output = [0u8; 32]; + let mut kmac = Kmac::v128(key, custom); + kmac.update(data); + kmac.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_kmac128_three() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\ + \x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\ + \x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F\ + \x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + \x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F\ + \x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\x7C\x7D\x7E\x7F\ + \x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8A\x8B\x8C\x8D\x8E\x8F\ + \x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9A\x9B\x9C\x9D\x9E\x9F\ + \xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7\xA8\xA9\xAA\xAB\xAC\xAD\xAE\xAF\ + \xB0\xB1\xB2\xB3\xB4\xB5\xB6\xB7\xB8\xB9\xBA\xBB\xBC\xBD\xBE\xBF\ + \xC0\xC1\xC2\xC3\xC4\xC5\xC6\xC7\ + "; + let custom = b"My Tagged Application"; + let expected = b"\ + \x1F\x5B\x4E\x6C\xCA\x02\x20\x9E\x0D\xCB\x5C\xA6\x35\xB8\x9A\x15\ + \xE2\x71\xEC\xC7\x60\x07\x1D\xFD\x80\x5F\xAA\x38\xF9\x72\x92\x30\ + "; + let mut output = [0u8; 32]; + + let mut kmac = Kmac::v128(key, custom); + kmac.update(data); + kmac.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_kmac256_one() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\x00\x01\x02\x03"; + let custom = b"My Tagged Application"; + let expected = b"\ + \x20\xC5\x70\xC3\x13\x46\xF7\x03\xC9\xAC\x36\xC6\x1C\x03\xCB\x64\ + \xC3\x97\x0D\x0C\xFC\x78\x7E\x9B\x79\x59\x9D\x27\x3A\x68\xD2\xF7\ + \xF6\x9D\x4C\xC3\xDE\x9D\x10\x4A\x35\x16\x89\xF2\x7C\xF6\xF5\x95\ + \x1F\x01\x03\xF3\x3F\x4F\x24\x87\x10\x24\xD9\xC2\x77\x73\xA8\xDD\ + "; + let mut output = [0u8; 64]; + let mut kmac = Kmac::v256(key, custom); + kmac.update(data); + kmac.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_kmac256_two() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\ + \x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\ + \x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F\ + \x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + \x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F\ + \x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\x7C\x7D\x7E\x7F\ + \x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8A\x8B\x8C\x8D\x8E\x8F\ + \x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9A\x9B\x9C\x9D\x9E\x9F\ + \xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7\xA8\xA9\xAA\xAB\xAC\xAD\xAE\xAF\ + \xB0\xB1\xB2\xB3\xB4\xB5\xB6\xB7\xB8\xB9\xBA\xBB\xBC\xBD\xBE\xBF\ + \xC0\xC1\xC2\xC3\xC4\xC5\xC6\xC7\ + "; + let custom = b""; + let expected = b"\ + \x75\x35\x8C\xF3\x9E\x41\x49\x4E\x94\x97\x07\x92\x7C\xEE\x0A\xF2\ + \x0A\x3F\xF5\x53\x90\x4C\x86\xB0\x8F\x21\xCC\x41\x4B\xCF\xD6\x91\ + \x58\x9D\x27\xCF\x5E\x15\x36\x9C\xBB\xFF\x8B\x9A\x4C\x2E\xB1\x78\ + \x00\x85\x5D\x02\x35\xFF\x63\x5D\xA8\x25\x33\xEC\x6B\x75\x9B\x69\ + "; + let mut output = [0u8; 64]; + let mut kmac = Kmac::v256(key, custom); + kmac.update(data); + kmac.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_kmac256_three() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\ + \x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\ + \x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F\ + \x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + \x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F\ + \x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\x7C\x7D\x7E\x7F\ + \x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8A\x8B\x8C\x8D\x8E\x8F\ + \x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9A\x9B\x9C\x9D\x9E\x9F\ + \xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7\xA8\xA9\xAA\xAB\xAC\xAD\xAE\xAF\ + \xB0\xB1\xB2\xB3\xB4\xB5\xB6\xB7\xB8\xB9\xBA\xBB\xBC\xBD\xBE\xBF\ + \xC0\xC1\xC2\xC3\xC4\xC5\xC6\xC7\ + "; + let custom = b"My Tagged Application"; + let expected = b"\ + \xB5\x86\x18\xF7\x1F\x92\xE1\xD5\x6C\x1B\x8C\x55\xDD\xD7\xCD\x18\ + \x8B\x97\xB4\xCA\x4D\x99\x83\x1E\xB2\x69\x9A\x83\x7D\xA2\xE4\xD9\ + \x70\xFB\xAC\xFD\xE5\x00\x33\xAE\xA5\x85\xF1\xA2\x70\x85\x10\xC3\ + \x2D\x07\x88\x08\x01\xBD\x18\x28\x98\xFE\x47\x68\x76\xFC\x89\x65\ + "; + let mut output = [0u8; 64]; + let mut kmac = Kmac::v256(key, custom); + kmac.update(data); + kmac.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_kmac128_xof_one() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\x00\x01\x02\x03"; + let custom = b""; + let expected = b"\ + \xCD\x83\x74\x0B\xBD\x92\xCC\xC8\xCF\x03\x2B\x14\x81\xA0\xF4\x46\ + \x0E\x7C\xA9\xDD\x12\xB0\x8A\x0C\x40\x31\x17\x8B\xAC\xD6\xEC\x35\ + "; + let mut output = [0u8; 32]; + let mut kmac = Kmac::v128(key, custom); + kmac.update(data); + let mut xof = kmac.into_xof(); + xof.squeeze(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_kmac128_xof_two() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\x00\x01\x02\x03"; + let custom = b"My Tagged Application"; + let expected = b"\ + \x31\xA4\x45\x27\xB4\xED\x9F\x5C\x61\x01\xD1\x1D\xE6\xD2\x6F\x06\ + \x20\xAA\x5C\x34\x1D\xEF\x41\x29\x96\x57\xFE\x9D\xF1\xA3\xB1\x6C\ + "; + let mut output = [0u8; 32]; + let mut kmac = Kmac::v128(key, custom); + kmac.update(data); + let mut xof = kmac.into_xof(); + xof.squeeze(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_kmac128_xof_three() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\ + \x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\ + \x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F\ + \x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + \x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F\ + \x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\x7C\x7D\x7E\x7F\ + \x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8A\x8B\x8C\x8D\x8E\x8F\ + \x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9A\x9B\x9C\x9D\x9E\x9F\ + \xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7\xA8\xA9\xAA\xAB\xAC\xAD\xAE\xAF\ + \xB0\xB1\xB2\xB3\xB4\xB5\xB6\xB7\xB8\xB9\xBA\xBB\xBC\xBD\xBE\xBF\ + \xC0\xC1\xC2\xC3\xC4\xC5\xC6\xC7\ + "; + let custom = b"My Tagged Application"; + let expected = b"\ + \x47\x02\x6C\x7C\xD7\x93\x08\x4A\xA0\x28\x3C\x25\x3E\xF6\x58\x49\ + \x0C\x0D\xB6\x14\x38\xB8\x32\x6F\xE9\xBD\xDF\x28\x1B\x83\xAE\x0F\ + "; + let mut output = [0u8; 32]; + let mut kmac = Kmac::v128(key, custom); + kmac.update(data); + let mut xof = kmac.into_xof(); + xof.squeeze(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_kmac256_xof_one() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\x00\x01\x02\x03"; + let custom = b"My Tagged Application"; + let expected = b"\ + \x17\x55\x13\x3F\x15\x34\x75\x2A\xAD\x07\x48\xF2\xC7\x06\xFB\x5C\ + \x78\x45\x12\xCA\xB8\x35\xCD\x15\x67\x6B\x16\xC0\xC6\x64\x7F\xA9\ + \x6F\xAA\x7A\xF6\x34\xA0\xBF\x8F\xF6\xDF\x39\x37\x4F\xA0\x0F\xAD\ + \x9A\x39\xE3\x22\xA7\xC9\x20\x65\xA6\x4E\xB1\xFB\x08\x01\xEB\x2B\ + "; + let mut output = [0u8; 64]; + let mut kmac = Kmac::v256(key, custom); + kmac.update(data); + let mut xof = kmac.into_xof(); + xof.squeeze(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_kmac256_xof_two() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\ + \x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\ + \x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F\ + \x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + \x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F\ + \x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\x7C\x7D\x7E\x7F\ + \x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8A\x8B\x8C\x8D\x8E\x8F\ + \x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9A\x9B\x9C\x9D\x9E\x9F\ + \xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7\xA8\xA9\xAA\xAB\xAC\xAD\xAE\xAF\ + \xB0\xB1\xB2\xB3\xB4\xB5\xB6\xB7\xB8\xB9\xBA\xBB\xBC\xBD\xBE\xBF\ + \xC0\xC1\xC2\xC3\xC4\xC5\xC6\xC7\ + "; + let custom = b""; + let expected = b"\ + \xFF\x7B\x17\x1F\x1E\x8A\x2B\x24\x68\x3E\xED\x37\x83\x0E\xE7\x97\ + \x53\x8B\xA8\xDC\x56\x3F\x6D\xA1\xE6\x67\x39\x1A\x75\xED\xC0\x2C\ + \xA6\x33\x07\x9F\x81\xCE\x12\xA2\x5F\x45\x61\x5E\xC8\x99\x72\x03\ + \x1D\x18\x33\x73\x31\xD2\x4C\xEB\x8F\x8C\xA8\xE6\xA1\x9F\xD9\x8B\ + "; + let mut output = [0u8; 64]; + let mut kmac = Kmac::v256(key, custom); + kmac.update(data); + let mut xof = kmac.into_xof(); + xof.squeeze(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_kmac256_xof_three() { + let key = b"\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + "; + let data = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\ + \x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\ + \x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F\ + \x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F\ + \x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F\ + \x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F\ + \x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F\ + \x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\x7C\x7D\x7E\x7F\ + \x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8A\x8B\x8C\x8D\x8E\x8F\ + \x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9A\x9B\x9C\x9D\x9E\x9F\ + \xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7\xA8\xA9\xAA\xAB\xAC\xAD\xAE\xAF\ + \xB0\xB1\xB2\xB3\xB4\xB5\xB6\xB7\xB8\xB9\xBA\xBB\xBC\xBD\xBE\xBF\ + \xC0\xC1\xC2\xC3\xC4\xC5\xC6\xC7\ + "; + let custom = b"My Tagged Application"; + let expected = b"\ + \xD5\xBE\x73\x1C\x95\x4E\xD7\x73\x28\x46\xBB\x59\xDB\xE3\xA8\xE3\ + \x0F\x83\xE7\x7A\x4B\xFF\x44\x59\xF2\xF1\xC2\xB4\xEC\xEB\xB8\xCE\ + \x67\xBA\x01\xC6\x2E\x8A\xB8\x57\x8D\x2D\x49\x9B\xD1\xBB\x27\x67\ + \x68\x78\x11\x90\x02\x0A\x30\x6A\x97\xDE\x28\x1D\xCC\x30\x30\x5D\ + "; + let mut output = [0u8; 64]; + let mut kmac = Kmac::v256(key, custom); + kmac.update(data); + let mut xof = kmac.into_xof(); + xof.squeeze(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} diff --git a/vendor/tiny-keccak/tests/parallel_hash.rs b/vendor/tiny-keccak/tests/parallel_hash.rs new file mode 100644 index 0000000..e119caa --- /dev/null +++ b/vendor/tiny-keccak/tests/parallel_hash.rs @@ -0,0 +1,123 @@ +use tiny_keccak::{Hasher, ParallelHash}; + +#[test] +fn test_parallel_hash128_one() { + let custom_string = b""; + let input = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x10\x11\x12\x13\ + \x14\x15\x16\x17\x20\x21\x22\x23\x24\x25\x26\x27\ + "; + let block_size = 8; + let mut phash = ParallelHash::v128(custom_string, block_size); + let expected = b"\ + \xBA\x8D\xC1\xD1\xD9\x79\x33\x1D\x3F\x81\x36\x03\xC6\x7F\x72\x60\ + \x9A\xB5\xE4\x4B\x94\xA0\xB8\xF9\xAF\x46\x51\x44\x54\xA2\xB4\xF5\ + "; + let mut output = [0u8; 32]; + phash.update(input); + phash.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_parallel_hash128_two() { + let custom_string = b"Parallel Data"; + let input = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x10\x11\x12\x13\ + \x14\x15\x16\x17\x20\x21\x22\x23\x24\x25\x26\x27\ + "; + let block_size = 8; + let mut phash = ParallelHash::v128(custom_string, block_size); + let expected = b"\ + \xFC\x48\x4D\xCB\x3F\x84\xDC\xEE\xDC\x35\x34\x38\x15\x1B\xEE\x58\ + \x15\x7D\x6E\xFE\xD0\x44\x5A\x81\xF1\x65\xE4\x95\x79\x5B\x72\x06\ + "; + let mut output = [0u8; 32]; + phash.update(input); + phash.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_parallel_hash128_three() { + let custom_string = b""; + let input = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x10\x11\x12\x13\ + \x14\x15\x16\x17\x20\x21\x22\x23\x24\x25\x26\x27\ + "; + let block_size = 8; + let mut phash = ParallelHash::v128(custom_string, block_size); + let expected = b"\ + \xBA\x8D\xC1\xD1\xD9\x79\x33\x1D\x3F\x81\x36\x03\xC6\x7F\x72\x60\ + \x9A\xB5\xE4\x4B\x94\xA0\xB8\xF9\xAF\x46\x51\x44\x54\xA2\xB4\xF5\ + "; + let mut output = [0u8; 32]; + phash.update(&input[..13]); + phash.update(&input[13..]); + phash.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_parallel_hash256_one() { + let custom_string = b""; + let input = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x10\x11\x12\x13\ + \x14\x15\x16\x17\x20\x21\x22\x23\x24\x25\x26\x27\ + "; + let block_size = 8; + let mut phash = ParallelHash::v256(custom_string, block_size); + let expected = b"\ + \xBC\x1E\xF1\x24\xDA\x34\x49\x5E\x94\x8E\xAD\x20\x7D\xD9\x84\x22\ + \x35\xDA\x43\x2D\x2B\xBC\x54\xB4\xC1\x10\xE6\x4C\x45\x11\x05\x53\ + \x1B\x7F\x2A\x3E\x0C\xE0\x55\xC0\x28\x05\xE7\xC2\xDE\x1F\xB7\x46\ + \xAF\x97\xA1\xDD\x01\xF4\x3B\x82\x4E\x31\xB8\x76\x12\x41\x04\x29\ + "; + let mut output = [0u8; 64]; + phash.update(input); + phash.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_parallel_hash256_two() { + let custom_string = b"Parallel Data"; + let input = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x10\x11\x12\x13\ + \x14\x15\x16\x17\x20\x21\x22\x23\x24\x25\x26\x27\ + "; + let block_size = 8; + let mut phash = ParallelHash::v256(custom_string, block_size); + let expected = b"\ + \xCD\xF1\x52\x89\xB5\x4F\x62\x12\xB4\xBC\x27\x05\x28\xB4\x95\x26\ + \x00\x6D\xD9\xB5\x4E\x2B\x6A\xDD\x1E\xF6\x90\x0D\xDA\x39\x63\xBB\ + \x33\xA7\x24\x91\xF2\x36\x96\x9C\xA8\xAF\xAE\xA2\x9C\x68\x2D\x47\ + \xA3\x93\xC0\x65\xB3\x8E\x29\xFA\xE6\x51\xA2\x09\x1C\x83\x31\x10\ + "; + let mut output = [0u8; 64]; + phash.update(input); + phash.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_parallel_hash256_three() { + let custom_string = b""; + let input = b"\ + \x00\x01\x02\x03\x04\x05\x06\x07\x10\x11\x12\x13\ + \x14\x15\x16\x17\x20\x21\x22\x23\x24\x25\x26\x27\ + "; + let block_size = 8; + let mut phash = ParallelHash::v256(custom_string, block_size); + let expected = b"\ + \xBC\x1E\xF1\x24\xDA\x34\x49\x5E\x94\x8E\xAD\x20\x7D\xD9\x84\x22\ + \x35\xDA\x43\x2D\x2B\xBC\x54\xB4\xC1\x10\xE6\x4C\x45\x11\x05\x53\ + \x1B\x7F\x2A\x3E\x0C\xE0\x55\xC0\x28\x05\xE7\xC2\xDE\x1F\xB7\x46\ + \xAF\x97\xA1\xDD\x01\xF4\x3B\x82\x4E\x31\xB8\x76\x12\x41\x04\x29\ + "; + let mut output = [0u8; 64]; + phash.update(&input[..13]); + phash.update(&input[13..]); + phash.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} diff --git a/vendor/tiny-keccak/tests/sha3.rs b/vendor/tiny-keccak/tests/sha3.rs new file mode 100644 index 0000000..0a6518e --- /dev/null +++ b/vendor/tiny-keccak/tests/sha3.rs @@ -0,0 +1,94 @@ +use tiny_keccak::{Hasher, Sha3}; + +#[test] +fn empty_sha3_256() { + let sha3 = Sha3::v256(); + let mut output = [0; 32]; + let expected = b"\ + \xa7\xff\xc6\xf8\xbf\x1e\xd7\x66\x51\xc1\x47\x56\xa0\x61\xd6\x62\ + \xf5\x80\xff\x4d\xe4\x3b\x49\xfa\x82\xd8\x0a\x4b\x80\xf8\x43\x4a\ + "; + sha3.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn hello_sha3_256() { + let mut sha3 = Sha3::v256(); + let input = b"hello"; + let mut output = [0u8; 32]; + let expected = b"\ + \x33\x38\xbe\x69\x4f\x50\xc5\xf3\x38\x81\x49\x86\xcd\xf0\x68\x64\ + \x53\xa8\x88\xb8\x4f\x42\x4d\x79\x2a\xf4\xb9\x20\x23\x98\xf3\x92\ + "; + sha3.update(input); + sha3.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn hello_sha3_256_parts() { + let mut sha3 = Sha3::v256(); + let input_a = b"hell"; + let input_b = b"o"; + let mut output = [0u8; 32]; + let expected = b"\ + \x33\x38\xbe\x69\x4f\x50\xc5\xf3\x38\x81\x49\x86\xcd\xf0\x68\x64\ + \x53\xa8\x88\xb8\x4f\x42\x4d\x79\x2a\xf4\xb9\x20\x23\x98\xf3\x92\ + "; + sha3.update(input_a); + sha3.update(input_b); + sha3.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn hello_sha3_256_parts5() { + let mut sha3 = Sha3::v256(); + let mut output = [0u8; 32]; + let expected = b"\ + \x33\x38\xbe\x69\x4f\x50\xc5\xf3\x38\x81\x49\x86\xcd\xf0\x68\x64\ + \x53\xa8\x88\xb8\x4f\x42\x4d\x79\x2a\xf4\xb9\x20\x23\x98\xf3\x92\ + "; + sha3.update(b"h"); + sha3.update(b"e"); + sha3.update(b"l"); + sha3.update(b"l"); + sha3.update(b"o"); + sha3.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn long_string_sha3_512() { + let mut sha3 = Sha3::v512(); + let input = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."; + let mut output = [0u8; 64]; + let expected = b"\ + \xf3\x2a\x94\x23\x55\x13\x51\xdf\x0a\x07\xc0\xb8\xc2\x0e\xb9\x72\ + \x36\x7c\x39\x8d\x61\x06\x60\x38\xe1\x69\x86\x44\x8e\xbf\xbc\x3d\ + \x15\xed\xe0\xed\x36\x93\xe3\x90\x5e\x9a\x8c\x60\x1d\x9d\x00\x2a\ + \x06\x85\x3b\x97\x97\xef\x9a\xb1\x0c\xbd\xe1\x00\x9c\x7d\x0f\x09\ + "; + sha3.update(input); + sha3.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn long_string_sha3_512_parts() { + let mut sha3 = Sha3::v512(); + let input_a = b"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip "; + let input_b = b"ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."; + let mut output = [0u8; 64]; + let expected = b"\ + \xf3\x2a\x94\x23\x55\x13\x51\xdf\x0a\x07\xc0\xb8\xc2\x0e\xb9\x72\ + \x36\x7c\x39\x8d\x61\x06\x60\x38\xe1\x69\x86\x44\x8e\xbf\xbc\x3d\ + \x15\xed\xe0\xed\x36\x93\xe3\x90\x5e\x9a\x8c\x60\x1d\x9d\x00\x2a\ + \x06\x85\x3b\x97\x97\xef\x9a\xb1\x0c\xbd\xe1\x00\x9c\x7d\x0f\x09\ + "; + sha3.update(input_a); + sha3.update(input_b); + sha3.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} diff --git a/vendor/tiny-keccak/tests/shake.rs b/vendor/tiny-keccak/tests/shake.rs new file mode 100644 index 0000000..bc411ce --- /dev/null +++ b/vendor/tiny-keccak/tests/shake.rs @@ -0,0 +1,37 @@ +use tiny_keccak::{Hasher, Shake, Xof}; + +#[test] +fn shake_xof_one() { + let mut shake = Shake::v128(); + let mut output = [0; 32]; + let expected = b"\ + \x43\xE4\x1B\x45\xA6\x53\xF2\xA5\xC4\x49\x2C\x1A\xDD\x54\x45\x12\ + \xDD\xA2\x52\x98\x33\x46\x2B\x71\xA4\x1A\x45\xBE\x97\x29\x0B\x6F\ + "; + + for _ in 0..16 { + shake.squeeze(&mut output); + } + + assert_eq!(expected, &output); +} + +#[test] +fn shake_xof_two() { + let mut shake = Shake::v128(); + let mut output = [0; 32]; + let expected = b"\ + \x44\xC9\xFB\x35\x9F\xD5\x6A\xC0\xA9\xA7\x5A\x74\x3C\xFF\x68\x62\ + \xF1\x7D\x72\x59\xAB\x07\x52\x16\xC0\x69\x95\x11\x64\x3B\x64\x39\ + "; + + for _ in 0..10 { + shake.update(&[0xa3; 20]); + } + + for _ in 0..16 { + shake.squeeze(&mut output); + } + + assert_eq!(expected, &output); +} diff --git a/vendor/tiny-keccak/tests/tuple_hash.rs b/vendor/tiny-keccak/tests/tuple_hash.rs new file mode 100644 index 0000000..7665b14 --- /dev/null +++ b/vendor/tiny-keccak/tests/tuple_hash.rs @@ -0,0 +1,113 @@ +use tiny_keccak::{Hasher, TupleHash}; + +#[test] +fn test_tuple_hash128_one() { + let te3 = b"\x00\x01\x02"; + let te6 = b"\x10\x11\x12\x13\x14\x15"; + let s0 = b""; + let expected = b"\ + \xC5\xD8\x78\x6C\x1A\xFB\x9B\x82\x11\x1A\xB3\x4B\x65\xB2\xC0\x04\ + \x8F\xA6\x4E\x6D\x48\xE2\x63\x26\x4C\xE1\x70\x7D\x3F\xFC\x8E\xD1\ + "; + let mut output = [0u8; 32]; + let mut hasher = TupleHash::v128(s0); + hasher.update(te3); + hasher.update(te6); + hasher.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_tuple_hash128_two() { + let te3 = b"\x00\x01\x02"; + let te6 = b"\x10\x11\x12\x13\x14\x15"; + let s1 = b"My Tuple App"; + let expected = b"\ + \x75\xCD\xB2\x0F\xF4\xDB\x11\x54\xE8\x41\xD7\x58\xE2\x41\x60\xC5\ + \x4B\xAE\x86\xEB\x8C\x13\xE7\xF5\xF4\x0E\xB3\x55\x88\xE9\x6D\xFB\ + "; + let mut output = [0u8; 32]; + let mut hasher = TupleHash::v128(s1); + hasher.update(te3); + hasher.update(te6); + hasher.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_tuple_hash128_three() { + let te3 = b"\x00\x01\x02"; + let te6 = b"\x10\x11\x12\x13\x14\x15"; + let te9 = b"\x20\x21\x22\x23\x24\x25\x26\x27\x28"; + let s1 = b"My Tuple App"; + let expected = b"\ + \xE6\x0F\x20\x2C\x89\xA2\x63\x1E\xDA\x8D\x4C\x58\x8C\xA5\xFD\x07\ + \xF3\x9E\x51\x51\x99\x8D\xEC\xCF\x97\x3A\xDB\x38\x04\xBB\x6E\x84\ + "; + let mut output = [0u8; 32]; + let mut hasher = TupleHash::v128(s1); + hasher.update(te3); + hasher.update(te6); + hasher.update(te9); + hasher.finalize(&mut output); + assert_eq!(expected, &output); +} + +#[test] +fn test_tuple_hash256() { + let te3 = b"\x00\x01\x02"; + let te6 = b"\x10\x11\x12\x13\x14\x15"; + let s0 = b""; + let expected = b"\ + \xCF\xB7\x05\x8C\xAC\xA5\xE6\x68\xF8\x1A\x12\xA2\x0A\x21\x95\xCE\ + \x97\xA9\x25\xF1\xDB\xA3\xE7\x44\x9A\x56\xF8\x22\x01\xEC\x60\x73\ + \x11\xAC\x26\x96\xB1\xAB\x5E\xA2\x35\x2D\xF1\x42\x3B\xDE\x7B\xD4\ + \xBB\x78\xC9\xAE\xD1\xA8\x53\xC7\x86\x72\xF9\xEB\x23\xBB\xE1\x94\ + "; + let mut output = [0u8; 64]; + let mut hasher = TupleHash::v256(s0); + hasher.update(te3); + hasher.update(te6); + hasher.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_tuple_hash256_two() { + let te3 = b"\x00\x01\x02"; + let te6 = b"\x10\x11\x12\x13\x14\x15"; + let s1 = b"My Tuple App"; + let expected = b"\ + \x14\x7C\x21\x91\xD5\xED\x7E\xFD\x98\xDB\xD9\x6D\x7A\xB5\xA1\x16\ + \x92\x57\x6F\x5F\xE2\xA5\x06\x5F\x3E\x33\xDE\x6B\xBA\x9F\x3A\xA1\ + \xC4\xE9\xA0\x68\xA2\x89\xC6\x1C\x95\xAA\xB3\x0A\xEE\x1E\x41\x0B\ + \x0B\x60\x7D\xE3\x62\x0E\x24\xA4\xE3\xBF\x98\x52\xA1\xD4\x36\x7E\ + "; + let mut output = [0u8; 64]; + let mut hasher = TupleHash::v256(s1); + hasher.update(te3); + hasher.update(te6); + hasher.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} + +#[test] +fn test_tuple_hash256_three() { + let te3 = b"\x00\x01\x02"; + let te6 = b"\x10\x11\x12\x13\x14\x15"; + let te9 = b"\x20\x21\x22\x23\x24\x25\x26\x27\x28"; + let s1 = b"My Tuple App"; + let expected = b"\ + \x45\x00\x0B\xE6\x3F\x9B\x6B\xFD\x89\xF5\x47\x17\x67\x0F\x69\xA9\ + \xBC\x76\x35\x91\xA4\xF0\x5C\x50\xD6\x88\x91\xA7\x44\xBC\xC6\xE7\ + \xD6\xD5\xB5\xE8\x2C\x01\x8D\xA9\x99\xED\x35\xB0\xBB\x49\xC9\x67\ + \x8E\x52\x6A\xBD\x8E\x85\xC1\x3E\xD2\x54\x02\x1D\xB9\xE7\x90\xCE\ + "; + let mut output = [0u8; 64]; + let mut hasher = TupleHash::v256(s1); + hasher.update(te3); + hasher.update(te6); + hasher.update(te9); + hasher.finalize(&mut output); + assert_eq!(expected as &[u8], &output as &[u8]); +} -- Gitee From 7360f458e932245e4930aea53dbbead85321bb8a Mon Sep 17 00:00:00 2001 From: yangpan Date: Fri, 5 Jan 2024 10:22:02 +0800 Subject: [PATCH 2/6] =?UTF-8?q?=E4=BF=AE=E6=94=B9vendor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- vendor/hashbrown/.cargo-checksum.json | 2 +- vendor/hashbrown/CHANGELOG.md | 139 +- vendor/hashbrown/Cargo.toml | 40 +- vendor/hashbrown/README.md | 77 +- vendor/hashbrown/benches/bench.rs | 2 +- .../hashbrown/src/external_trait_impls/mod.rs | 2 + .../src/external_trait_impls/rayon/map.rs | 47 +- .../src/external_trait_impls/rayon/mod.rs | 1 + .../src/external_trait_impls/rayon/raw.rs | 23 +- .../src/external_trait_impls/rayon/set.rs | 34 +- .../src/external_trait_impls/rayon/table.rs | 252 ++ .../src/external_trait_impls/rkyv/hash_map.rs | 125 + .../src/external_trait_impls/rkyv/hash_set.rs | 123 + .../src/external_trait_impls/rkyv/mod.rs | 2 + .../src/external_trait_impls/serde.rs | 63 +- vendor/hashbrown/src/lib.rs | 77 +- vendor/hashbrown/src/macros.rs | 2 +- vendor/hashbrown/src/map.rs | 1240 +++++-- vendor/hashbrown/src/raw/alloc.rs | 57 +- vendor/hashbrown/src/raw/bitmask.rs | 99 +- vendor/hashbrown/src/raw/generic.rs | 59 +- vendor/hashbrown/src/raw/mod.rs | 3236 ++++++++++++++--- vendor/hashbrown/src/raw/neon.rs | 124 + vendor/hashbrown/src/raw/sse2.rs | 31 +- vendor/hashbrown/src/rustc_entry.rs | 32 +- vendor/hashbrown/src/scopeguard.rs | 14 +- vendor/hashbrown/src/set.rs | 407 ++- vendor/hashbrown/src/table.rs | 2030 +++++++++++ vendor/hashbrown/tests/equivalent_trait.rs | 53 + vendor/hashbrown/tests/raw.rs | 11 + vendor/hashbrown/tests/rayon.rs | 4 +- vendor/hashbrown/tests/set.rs | 2 +- 32 files changed, 7096 insertions(+), 1314 deletions(-) create mode 100644 vendor/hashbrown/src/external_trait_impls/rayon/table.rs create mode 100644 vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs create mode 100644 vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs create mode 100644 vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs create mode 100644 vendor/hashbrown/src/raw/neon.rs create mode 100644 vendor/hashbrown/src/table.rs create mode 100644 vendor/hashbrown/tests/equivalent_trait.rs create mode 100644 vendor/hashbrown/tests/raw.rs diff --git a/vendor/hashbrown/.cargo-checksum.json b/vendor/hashbrown/.cargo-checksum.json index 5561cde..6bb5ac5 100644 --- a/vendor/hashbrown/.cargo-checksum.json +++ b/vendor/hashbrown/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"ade49a29d368e16ce508aee91b477ecbad7e2e52eb6fee7b4c1fc86199963f0e","Cargo.toml":"421b3a71d97faf0a7e52c3b2bfbe0f1c036b9dbf6232b4e5b41221bb54358f5a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"a536b3bb3f3521e59836080f05a4783150fa8484f759a31468ce3b6dba1f33eb","benches/bench.rs":"aadc39d815eadf094ed9357d946319df2d93194203bbccb7c33cea6951d654df","benches/insert_unique_unchecked.rs":"cb84275f22d5f95a5ac995ac6b2df74ffcf342765b401d27c95f2955c7b7cb9f","clippy.toml":"7535949f908c6d9aea4f9a9f3a7625552c93fc29e963d059d40f4def9d77ea7b","src/external_trait_impls/mod.rs":"d69528827794524cfd9acbeacc1ac4f6131e3c7574311e6d919f818f65fbff07","src/external_trait_impls/rayon/helpers.rs":"ba105bf0853ebc45157f22116ad0f55d3bdab75e721d8e7a677c7b912d0c0c6d","src/external_trait_impls/rayon/map.rs":"2809e2a0071db8101c38789deb955f3830c5c3455eb1794ff64a0cf2ceb53fc7","src/external_trait_impls/rayon/mod.rs":"156de9c1ad0123334ea3b7e5a17444faf1b8bf971aa88a1f23e2f2d1c3021141","src/external_trait_impls/rayon/raw.rs":"e62c5f3ca5fffea47357e64b6f8c34cec94af62d9bd28a2b87934da46c22b66e","src/external_trait_impls/rayon/set.rs":"c4c44d44e56c2f59e9e1355662e29d8744ac96645ca4414127a359fb46cb0fbf","src/external_trait_impls/serde.rs":"0bc1a1f218d1ae7a5262557a5e3737b9334caf7d50c136dbdc75ff75680c223b","src/lib.rs":"c82fbee9684bfff40ef55d5f0c9f855c11f71f9fd1720fb084ef8331bdbc41d8","src/macros.rs":"36fe532656879c80f7753d13354b889f5b45caef451a1bb3a27dbc32d74c9878","src/map.rs":"df39edae67c569378dea9a4d928685cb4d06569712c6ac36a54df76fb5d87fe3","src/raw/alloc.rs":"184a0345bc2c7544b65c28724063be26b1f2b28dbaaa028a0b01192ccac25557","src/raw/bitmask.rs":"820d90b19b7e3433a1048ace008c9526331cd53a576cb0cfc1ff9960b6fe52f8","src/raw/generic.rs":"f5013a50d6d82d5cc8bad8b8c26c24d00fa810197f9f123256c58ac92e0d98f9","src/raw/mod.rs":"fa38247c6b3bd70636be50400debb9966a3446d49ee13e4f4e2dfe4ceed1b201","src/raw/sse2.rs":"838cfdb1daa1e70951ed25f985283b8b7ab4b46fa130f92eda152047ce6086f6","src/rustc_entry.rs":"cdd70972cba5b79ca1cad79869cb5e184d6dc4798ef90822e966ef89679ba011","src/scopeguard.rs":"d13de1b12897add7fe1c3eba6f906c9cc09d86509b6cfe06b95d63803fe9265c","src/set.rs":"6877d4a42eeadd681e3b8881528e4b20f14cfedbc11e9318bfcf425ef96d1546","tests/hasher.rs":"9a8fdf67e4415618e16729969c386eefe71408cded5d46cf7b67d969276a3452","tests/rayon.rs":"83d5289771542203f539a41cccb889fbe7ce70f5adf5b903ac9f051e3ba13cfa","tests/serde.rs":"6bac8054db722dd049901b37a6e006535bac30f425eb5cd91af19b5bc1dfe78e","tests/set.rs":"01cf39efb04646ef4c63a809ebb96dfa63cfec472bf8bdb6c121f6526d40c40e"},"package":"8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"} \ No newline at end of file +{"files":{"CHANGELOG.md":"9cff035ecd949ca041cae2ab20be5c642360b369a499286ea830d4a48bf3b284","Cargo.toml":"a23bc72f1aed8ac540796975437fb8e158e7b4a186c1d646711717f57c4473ce","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"84c222ce49510535419d338b7532a72a2bf22b7466e44de78d92d25b6c7d636b","benches/bench.rs":"ef7bc025922f077d307c565640c005d056e3d6c1713448a95aae92d3c22c1005","benches/insert_unique_unchecked.rs":"cb84275f22d5f95a5ac995ac6b2df74ffcf342765b401d27c95f2955c7b7cb9f","clippy.toml":"7535949f908c6d9aea4f9a9f3a7625552c93fc29e963d059d40f4def9d77ea7b","src/external_trait_impls/mod.rs":"0625e6a5e3b8ecc8901a12aeeea54393fd84617fb3a14d98a34d2d2bddb8d257","src/external_trait_impls/rayon/helpers.rs":"ba105bf0853ebc45157f22116ad0f55d3bdab75e721d8e7a677c7b912d0c0c6d","src/external_trait_impls/rayon/map.rs":"96fdf39b3f601f77152d7ce84541b8f51f32b9274b7da9c294862892e721a5d8","src/external_trait_impls/rayon/mod.rs":"126edc882501dddd25e442d9236508b5b386eb8c0a9f5d654f2dd081086c1616","src/external_trait_impls/rayon/raw.rs":"04012fb2e99648819b4bc0044107ed3cb94013e242b7865075c5bd9ebf1b6865","src/external_trait_impls/rayon/set.rs":"7539348ff7bc6e3cce6b3c019d62dc401eea0138c578fef729c2593e8ead1cfa","src/external_trait_impls/rayon/table.rs":"8778d29509c68b5b7cb66859db025d3939ce22e7cf370b20ff3dea4fe4b29fd0","src/external_trait_impls/rkyv/hash_map.rs":"7abe24318143b776016052b05840656afc858b1ba5252f3d418d61972477f53d","src/external_trait_impls/rkyv/hash_set.rs":"38d969125d17d606492ec4ec9fc06b7e7118eb903240dacf40de21b9b06fa5c8","src/external_trait_impls/rkyv/mod.rs":"54399ce5574fd1d84b7b0cb4238fa3e898575e89a6724299be009d2172bda02e","src/external_trait_impls/serde.rs":"6dbe104dee16b453b6b048b541c6e02c6d067d970dfafd243fc4360288b0168c","src/lib.rs":"fbc05970d6458046590e9c4a33fc9a6fdc94ef725b9b00354fa609e207e6ae50","src/macros.rs":"98a26b908fc0fbe6a58d008a317e550013d615eb3cc17a5054a573c62c1d74cb","src/map.rs":"688f2ccecd38f32c66c7fc905703f363dd88511fc29c99bc260bb6973db66430","src/raw/alloc.rs":"902f8588d0fdee3e5c3dc02410f41d4b38ac88843727387f929f3186b3a2d322","src/raw/bitmask.rs":"3b3dce8d6a48856ada19085abf43908f124ab3419fcf434b9ca64d7bff243f67","src/raw/generic.rs":"efc5e603be3e9a17935aef1836a38ce01c78a0093b2af0671548eb5459b37921","src/raw/mod.rs":"73038e430bd54d56c484b6798e67dece4d67b3cf86031639a819629e8376d673","src/raw/neon.rs":"9907d8ebc36fc3df562dde478ea9b72213fda65288a304718d8647f0029dc9ad","src/raw/sse2.rs":"39038e3344e49f4638e211bcdbf56565ac53e90dce56172cc3b526fea911c2af","src/rustc_entry.rs":"8142ed89b50155602ef8c1628382bd62d3ee903920fe49d403d4100a278c6ba4","src/scopeguard.rs":"1a246e08a63c06cd8ad934bd7da229421bf804f991ae93cd7e242da27ca6c601","src/set.rs":"4069da81fc978f6d3b9605d8cf349c2b1b8c7766ab6bf3fec83b6442718fdce7","src/table.rs":"b64e4c4910b911175ae0eb72e744986ce695d3ecc0b52b70d916e3adefdd1908","tests/equivalent_trait.rs":"84faa3fe9d67c375d03fec81f0f1412c47862477d42e84e7d235258236338d5b","tests/hasher.rs":"9a8fdf67e4415618e16729969c386eefe71408cded5d46cf7b67d969276a3452","tests/raw.rs":"43ed2f98877533a0905611d9a30f26b183dd3e103e3856eeab80e7b8ac7894d3","tests/rayon.rs":"39cb24ab45fce8087bb54948715c8b6973ebfba1a325292b5b3cd9aab50b5fd2","tests/serde.rs":"6bac8054db722dd049901b37a6e006535bac30f425eb5cd91af19b5bc1dfe78e","tests/set.rs":"9f8011c29d1059aadb54b6dd4623521d5178b4278b4a56021ef2cee4bbb19fd9"},"package":"f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156"} \ No newline at end of file diff --git a/vendor/hashbrown/CHANGELOG.md b/vendor/hashbrown/CHANGELOG.md index 3354b54..0e13b23 100644 --- a/vendor/hashbrown/CHANGELOG.md +++ b/vendor/hashbrown/CHANGELOG.md @@ -7,35 +7,133 @@ and this project adheres to [Semantic Versioning](https://semver.org/). ## [Unreleased] +## [v0.14.2] - 2023-10-19 + +### Added + +- `HashTable` type which provides a low-level but safe API with explicit hashing. (#466) + +### Fixed + +- Disabled the use of NEON instructions on big-endian ARM. (#475) +- Disabled the use of NEON instructions on Miri. (#476) + +## [v0.14.1] - 2023-09-28 + +### Added + +- Allow serializing `HashMap`s that use a custom allocator. (#449) + +### Changed + +- Use the `Equivalent` trait from the `equivalent` crate. (#442) +- Slightly improved performance of table resizing. (#451) +- Relaxed MSRV to 1.63.0. (#457) +- Removed `Clone` requirement from custom allocators. (#468) + +### Fixed + +- Fixed custom allocators being leaked in some situations. (#439, #465) + +## [v0.14.0] - 2023-06-01 + +### Added + +- Support for `allocator-api2` crate + for interfacing with custom allocators on stable. (#417) +- Optimized implementation for ARM using NEON instructions. (#430) +- Support for rkyv serialization. (#432) +- `Equivalent` trait to look up values without `Borrow`. (#345) +- `Hash{Map,Set}::raw_table_mut` is added whic returns a mutable reference. (#404) +- Fast path for `clear` on empty tables. (#428) + +### Changed + +- Optimized insertion to only perform a single lookup. (#277) +- `DrainFilter` (`drain_filter`) has been renamed to `ExtractIf` and no longer drops remaining + elements when the iterator is dropped. #(374) +- Bumped MSRV to 1.64.0. (#431) +- `{Map,Set}::raw_table` now returns an immutable reference. (#404) +- `VacantEntry` and `OccupiedEntry` now use the default hasher if none is + specified in generics. (#389) +- `RawTable::data_start` now returns a `NonNull` to match `RawTable::data_end`. (#387) +- `RawIter::{reflect_insert, reflect_remove}` are now unsafe. (#429) +- `RawTable::find_potential` is renamed to `find_or_find_insert_slot` and returns an `InsertSlot`. (#429) +- `RawTable::remove` now also returns an `InsertSlot`. (#429) +- `InsertSlot` can be used to insert an element with `RawTable::insert_in_slot`. (#429) +- `RawIterHash` no longer has a lifetime tied to that of the `RawTable`. (#427) +- The trait bounds of `HashSet::raw_table` have been relaxed to not require `Eq + Hash`. (#423) +- `EntryRef::and_replace_entry_with` and `OccupiedEntryRef::replace_entry_with` + were changed to give a `&K` instead of a `&Q` to the closure. + +### Removed + +- Support for `bumpalo` as an allocator with custom wrapper. + Use `allocator-api2` feature in `bumpalo` to use it as an allocator + for `hashbrown` collections. (#417) + +## [v0.13.2] - 2023-01-12 + +### Fixed + +- Added `#[inline(always)]` to `find_inner`. (#375) +- Fixed `RawTable::allocation_info` for empty tables. (#376) + +## [v0.13.1] - 2022-11-10 + +### Added + +- Added `Equivalent` trait to customize key lookups. (#350) +- Added support for 16-bit targets. (#368) +- Added `RawTable::allocation_info` which provides information about the memory + usage of a table. (#371) + +### Changed + +- Bumped MSRV to 1.61.0. +- Upgraded to `ahash` 0.8. (#357) +- Make `with_hasher_in` const. (#355) +- The following methods have been removed from the `RawTable` API in favor of + safer alternatives: + - `RawTable::erase_no_drop` => Use `RawTable::erase` or `RawTable::remove` instead. + - `Bucket::read` => Use `RawTable::remove` instead. + - `Bucket::drop` => Use `RawTable::erase` instead. + - `Bucket::write` => Use `Bucket::as_mut` instead. + +### Fixed + +- Ensure that `HashMap` allocations don't exceed `isize::MAX`. (#362) +- Fixed issue with field retagging in scopeguard. (#359) + ## [v0.12.3] - 2022-07-17 -## Fixed +### Fixed - Fixed double-drop in `RawTable::clone_from`. (#348) ## [v0.12.2] - 2022-07-09 -## Added +### Added - Added `Entry` API for `HashSet`. (#342) - Added `Extend<&'a (K, V)> for HashMap`. (#340) - Added length-based short-circuiting for hash table iteration. (#338) - Added a function to access the `RawTable` of a `HashMap`. (#335) -## Changed +### Changed - Edited `do_alloc` to reduce LLVM IR generated. (#341) ## [v0.12.1] - 2022-05-02 -## Fixed +### Fixed - Fixed underflow in `RawIterRange::size_hint`. (#325) - Fixed the implementation of `Debug` for `ValuesMut` and `IntoValues`. (#325) ## [v0.12.0] - 2022-01-17 -## Added +### Added - Added `From<[T; N]>` and `From<[(K, V); N]>` for `HashSet` and `HashMap` respectively. (#297) - Added an `allocator()` getter to HashMap and HashSet. (#257) @@ -44,7 +142,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/). - Implement `From` on `HashSet` and `HashMap`. (#298) - Added `entry_ref` API to `HashMap`. (#201) -## Changed +### Changed - Bumped minimum Rust version to 1.56.1 and edition to 2021. - Use u64 for the GroupWord on WebAssembly. (#271) @@ -56,7 +154,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/). - Rename `get_each_mut` to `get_many_mut` and align API with the stdlib. (#291) - Don't hash the key when searching in an empty table. (#305) -## Fixed +### Fixed - Guard against allocations exceeding isize::MAX. (#268) - Made `RawTable::insert_no_grow` unsafe. (#254) @@ -65,19 +163,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/). ## [v0.11.2] - 2021-03-25 -## Fixed +### Fixed - Added missing allocator type parameter to `HashMap`'s and `HashSet`'s `Clone` impls. (#252) ## [v0.11.1] - 2021-03-20 -## Fixed +### Fixed - Added missing `pub` modifier to `BumpWrapper`. (#251) ## [v0.11.0] - 2021-03-14 -## Added +### Added - Added safe `try_insert_no_grow` method to `RawTable`. (#229) - Added support for `bumpalo` as an allocator without the `nightly` feature. (#231) - Implemented `Default` for `RawTable`. (#237) @@ -86,22 +184,22 @@ and this project adheres to [Semantic Versioning](https://semver.org/). - Added `From>` for `HashSet`. (#235) - Added `try_insert` method to `HashMap`. (#247) -## Changed +### Changed - The minimum Rust version has been bumped to 1.49.0. (#230) - Significantly improved compilation times by reducing the amount of generated IR. (#205) -## Removed +### Removed - We no longer re-export the unstable allocator items from the standard library, nor the stable shims approximating the same. (#227) - Removed hasher specialization support from `aHash`, which was resulting in inconsistent hashes being generated for a key. (#248) -## Fixed +### Fixed - Fixed union length comparison. (#228) ## ~~[v0.10.0] - 2021-01-16~~ This release was _yanked_ due to inconsistent hashes being generated with the `nightly` feature. (#248) -## Changed +### Changed - Parametrized `RawTable`, `HashSet` and `HashMap` over an allocator. (#133) - Improved branch prediction hints on stable. (#209) - Optimized hashing of primitive types with AHash using specialization. (#207) @@ -109,7 +207,7 @@ This release was _yanked_ due to inconsistent hashes being generated with the `n ## [v0.9.1] - 2020-09-28 -## Added +### Added - Added safe methods to `RawTable` (#202): - `get`: `find` and `as_ref` - `get_mut`: `find` and `as_mut` @@ -117,7 +215,7 @@ This release was _yanked_ due to inconsistent hashes being generated with the `n - `remove_entry`: `find` and `remove` - `erase_entry`: `find` and `erase` -## Changed +### Changed - Removed `from_key_hashed_nocheck`'s `Q: Hash`. (#200) - Made `RawTable::drain` safe. (#201) @@ -215,7 +313,7 @@ This release was _yanked_ due to inconsistent hashes being generated with the `n ## [v0.6.2] - 2019-10-23 ### Added -- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between +- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between runtime performance and compilation time. (#119) ## [v0.6.1] - 2019-10-04 @@ -363,7 +461,12 @@ This release was _yanked_ due to a breaking change for users of `no-default-feat - Initial release -[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.12.3...HEAD +[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.14.2...HEAD +[v0.14.2]: https://github.com/rust-lang/hashbrown/compare/v0.14.1...v0.14.2 +[v0.14.1]: https://github.com/rust-lang/hashbrown/compare/v0.14.0...v0.14.1 +[v0.14.0]: https://github.com/rust-lang/hashbrown/compare/v0.13.2...v0.14.0 +[v0.13.2]: https://github.com/rust-lang/hashbrown/compare/v0.13.1...v0.13.2 +[v0.13.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.3...v0.13.1 [v0.12.3]: https://github.com/rust-lang/hashbrown/compare/v0.12.2...v0.12.3 [v0.12.2]: https://github.com/rust-lang/hashbrown/compare/v0.12.1...v0.12.2 [v0.12.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.0...v0.12.1 diff --git a/vendor/hashbrown/Cargo.toml b/vendor/hashbrown/Cargo.toml index fb130d2..2f374f4 100644 --- a/vendor/hashbrown/Cargo.toml +++ b/vendor/hashbrown/Cargo.toml @@ -11,9 +11,9 @@ [package] edition = "2021" -rust-version = "1.56.0" +rust-version = "1.63.0" name = "hashbrown" -version = "0.12.3" +version = "0.14.2" authors = ["Amanieu d'Antras "] exclude = [ ".github", @@ -33,7 +33,6 @@ categories = [ ] license = "MIT OR Apache-2.0" repository = "https://github.com/rust-lang/hashbrown" -resolver = "2" [package.metadata.docs.rs] features = [ @@ -42,9 +41,10 @@ features = [ "serde", "raw", ] +rustdoc-args = ["--generate-link-to-definition"] [dependencies.ahash] -version = "0.7.0" +version = "0.8.0" optional = true default-features = false @@ -53,9 +53,11 @@ version = "1.0.0" optional = true package = "rustc-std-workspace-alloc" -[dependencies.bumpalo] -version = "3.5.0" +[dependencies.allocator-api2] +version = "0.2.9" +features = ["alloc"] optional = true +default-features = false [dependencies.compiler_builtins] version = "0.1.2" @@ -66,15 +68,30 @@ version = "1.0.0" optional = true package = "rustc-std-workspace-core" +[dependencies.equivalent] +version = "1.0" +optional = true +default-features = false + [dependencies.rayon] version = "1.0" optional = true +[dependencies.rkyv] +version = "0.7.42" +features = ["alloc"] +optional = true +default-features = false + [dependencies.serde] version = "1.0.25" optional = true default-features = false +[dev-dependencies.bumpalo] +version = "3.13.0" +features = ["allocator-api2"] + [dev-dependencies.doc-comment] version = "0.3.1" @@ -91,17 +108,24 @@ features = ["small_rng"] [dev-dependencies.rayon] version = "1.0" +[dev-dependencies.rkyv] +version = "0.7.42" +features = ["validation"] + [dev-dependencies.serde_test] version = "1.0" [features] -ahash-compile-time-rng = ["ahash/compile-time-rng"] default = [ "ahash", "inline-more", + "allocator-api2", ] inline-more = [] -nightly = [] +nightly = [ + "allocator-api2?/nightly", + "bumpalo/allocator_api", +] raw = [] rustc-dep-of-std = [ "nightly", diff --git a/vendor/hashbrown/README.md b/vendor/hashbrown/README.md index 2eddcf3..5eaef8b 100644 --- a/vendor/hashbrown/README.md +++ b/vendor/hashbrown/README.md @@ -4,7 +4,7 @@ hashbrown [![Build Status](https://github.com/rust-lang/hashbrown/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/hashbrown/actions) [![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown) [![Documentation](https://docs.rs/hashbrown/badge.svg)](https://docs.rs/hashbrown) -[![Rust](https://img.shields.io/badge/rust-1.56.1%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown) +[![Rust](https://img.shields.io/badge/rust-1.63.0%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown) This crate is a Rust port of Google's high-performance [SwissTable] hash map, adapted to make it a drop-in replacement for Rust's standard `HashMap` @@ -40,44 +40,44 @@ Compared to the previous implementation of `std::collections::HashMap` (Rust 1.3 With the hashbrown default AHash hasher: -| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | -|:------------------------|:-------------------:|------------------:|:------------:|---------:|---------| -| insert_ahash_highbits | 18,865 | 8,020 | -10,845 | -57.49% | x 2.35 | -| insert_ahash_random | 19,711 | 8,019 | -11,692 | -59.32% | x 2.46 | -| insert_ahash_serial | 19,365 | 6,463 | -12,902 | -66.63% | x 3.00 | -| insert_erase_ahash_highbits | 51,136 | 17,916 | -33,220 | -64.96% | x 2.85 | -| insert_erase_ahash_random | 51,157 | 17,688 | -33,469 | -65.42% | x 2.89 | -| insert_erase_ahash_serial | 45,479 | 14,895 | -30,584 | -67.25% | x 3.05 | -| iter_ahash_highbits | 1,399 | 1,092 | -307 | -21.94% | x 1.28 | -| iter_ahash_random | 1,586 | 1,059 | -527 | -33.23% | x 1.50 | -| iter_ahash_serial | 3,168 | 1,079 | -2,089 | -65.94% | x 2.94 | -| lookup_ahash_highbits | 32,351 | 4,792 | -27,559 | -85.19% | x 6.75 | -| lookup_ahash_random | 17,419 | 4,817 | -12,602 | -72.35% | x 3.62 | -| lookup_ahash_serial | 15,254 | 3,606 | -11,648 | -76.36% | x 4.23 | -| lookup_fail_ahash_highbits | 21,187 | 4,369 | -16,818 | -79.38% | x 4.85 | -| lookup_fail_ahash_random | 21,550 | 4,395 | -17,155 | -79.61% | x 4.90 | -| lookup_fail_ahash_serial | 19,450 | 3,176 | -16,274 | -83.67% | x 6.12 | +| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | +| :-------------------------- | :----------------: | ----------------: | :----------: | ------: | ------- | +| insert_ahash_highbits | 18,865 | 8,020 | -10,845 | -57.49% | x 2.35 | +| insert_ahash_random | 19,711 | 8,019 | -11,692 | -59.32% | x 2.46 | +| insert_ahash_serial | 19,365 | 6,463 | -12,902 | -66.63% | x 3.00 | +| insert_erase_ahash_highbits | 51,136 | 17,916 | -33,220 | -64.96% | x 2.85 | +| insert_erase_ahash_random | 51,157 | 17,688 | -33,469 | -65.42% | x 2.89 | +| insert_erase_ahash_serial | 45,479 | 14,895 | -30,584 | -67.25% | x 3.05 | +| iter_ahash_highbits | 1,399 | 1,092 | -307 | -21.94% | x 1.28 | +| iter_ahash_random | 1,586 | 1,059 | -527 | -33.23% | x 1.50 | +| iter_ahash_serial | 3,168 | 1,079 | -2,089 | -65.94% | x 2.94 | +| lookup_ahash_highbits | 32,351 | 4,792 | -27,559 | -85.19% | x 6.75 | +| lookup_ahash_random | 17,419 | 4,817 | -12,602 | -72.35% | x 3.62 | +| lookup_ahash_serial | 15,254 | 3,606 | -11,648 | -76.36% | x 4.23 | +| lookup_fail_ahash_highbits | 21,187 | 4,369 | -16,818 | -79.38% | x 4.85 | +| lookup_fail_ahash_random | 21,550 | 4,395 | -17,155 | -79.61% | x 4.90 | +| lookup_fail_ahash_serial | 19,450 | 3,176 | -16,274 | -83.67% | x 6.12 | With the libstd default SipHash hasher: -|name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | -|:------------------------|:-------------------:|------------------:|:------------:|---------:|---------| -|insert_std_highbits |19,216 |16,885 | -2,331 | -12.13% | x 1.14 | -|insert_std_random |19,179 |17,034 | -2,145 | -11.18% | x 1.13 | -|insert_std_serial |19,462 |17,493 | -1,969 | -10.12% | x 1.11 | -|insert_erase_std_highbits |50,825 |35,847 | -14,978 | -29.47% | x 1.42 | -|insert_erase_std_random |51,448 |35,392 | -16,056 | -31.21% | x 1.45 | -|insert_erase_std_serial |87,711 |38,091 | -49,620 | -56.57% | x 2.30 | -|iter_std_highbits |1,378 |1,159 | -219 | -15.89% | x 1.19 | -|iter_std_random |1,395 |1,132 | -263 | -18.85% | x 1.23 | -|iter_std_serial |1,704 |1,105 | -599 | -35.15% | x 1.54 | -|lookup_std_highbits |17,195 |13,642 | -3,553 | -20.66% | x 1.26 | -|lookup_std_random |17,181 |13,773 | -3,408 | -19.84% | x 1.25 | -|lookup_std_serial |15,483 |13,651 | -1,832 | -11.83% | x 1.13 | -|lookup_fail_std_highbits |20,926 |13,474 | -7,452 | -35.61% | x 1.55 | -|lookup_fail_std_random |21,766 |13,505 | -8,261 | -37.95% | x 1.61 | -|lookup_fail_std_serial |19,336 |13,519 | -5,817 | -30.08% | x 1.43 | +| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | +| :------------------------ | :----------------: | ----------------: | :----------: | ------: | ------- | +| insert_std_highbits | 19,216 | 16,885 | -2,331 | -12.13% | x 1.14 | +| insert_std_random | 19,179 | 17,034 | -2,145 | -11.18% | x 1.13 | +| insert_std_serial | 19,462 | 17,493 | -1,969 | -10.12% | x 1.11 | +| insert_erase_std_highbits | 50,825 | 35,847 | -14,978 | -29.47% | x 1.42 | +| insert_erase_std_random | 51,448 | 35,392 | -16,056 | -31.21% | x 1.45 | +| insert_erase_std_serial | 87,711 | 38,091 | -49,620 | -56.57% | x 2.30 | +| iter_std_highbits | 1,378 | 1,159 | -219 | -15.89% | x 1.19 | +| iter_std_random | 1,395 | 1,132 | -263 | -18.85% | x 1.23 | +| iter_std_serial | 1,704 | 1,105 | -599 | -35.15% | x 1.54 | +| lookup_std_highbits | 17,195 | 13,642 | -3,553 | -20.66% | x 1.26 | +| lookup_std_random | 17,181 | 13,773 | -3,408 | -19.84% | x 1.25 | +| lookup_std_serial | 15,483 | 13,651 | -1,832 | -11.83% | x 1.13 | +| lookup_fail_std_highbits | 20,926 | 13,474 | -7,452 | -35.61% | x 1.55 | +| lookup_fail_std_random | 21,766 | 13,505 | -8,261 | -37.95% | x 1.61 | +| lookup_fail_std_serial | 19,336 | 13,519 | -5,817 | -30.08% | x 1.43 | ## Usage @@ -85,7 +85,7 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -hashbrown = "0.12" +hashbrown = "0.14" ``` Then: @@ -101,14 +101,13 @@ This crate has the following Cargo features: - `nightly`: Enables nightly-only features including: `#[may_dangle]`. - `serde`: Enables serde serialization support. +- `rkyv`: Enables rkyv serialization support. - `rayon`: Enables rayon parallel iterator support. - `raw`: Enables access to the experimental and unsafe `RawTable` API. - `inline-more`: Adds inline hints to most functions, improving run-time performance at the cost of compilation time. (enabled by default) -- `bumpalo`: Provides a `BumpWrapper` type which allows `bumpalo` to be used for memory allocation. - `ahash`: Compiles with ahash as default hasher. (enabled by default) -- `ahash-compile-time-rng`: Activates the `compile-time-rng` feature of ahash. For targets with no random number generator -this pre-generates seeds at compile time and embeds them as constants. See [aHash's documentation](https://github.com/tkaitchuck/aHash#flags) (disabled by default) +- `allocator-api2`: Enables support for allocators that support `allocator-api2`. (enabled by default) ## License diff --git a/vendor/hashbrown/benches/bench.rs b/vendor/hashbrown/benches/bench.rs index c393b9a..346bd7e 100644 --- a/vendor/hashbrown/benches/bench.rs +++ b/vendor/hashbrown/benches/bench.rs @@ -311,7 +311,7 @@ fn rehash_in_place(b: &mut Bencher) { // Each loop triggers one rehash for _ in 0..10 { - for i in 0..224 { + for i in 0..223 { set.insert(i); } diff --git a/vendor/hashbrown/src/external_trait_impls/mod.rs b/vendor/hashbrown/src/external_trait_impls/mod.rs index ef49783..01d386b 100644 --- a/vendor/hashbrown/src/external_trait_impls/mod.rs +++ b/vendor/hashbrown/src/external_trait_impls/mod.rs @@ -1,4 +1,6 @@ #[cfg(feature = "rayon")] pub(crate) mod rayon; +#[cfg(feature = "rkyv")] +mod rkyv; #[cfg(feature = "serde")] mod serde; diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/map.rs b/vendor/hashbrown/src/external_trait_impls/rayon/map.rs index 14d91c2..2534dc9 100644 --- a/vendor/hashbrown/src/external_trait_impls/rayon/map.rs +++ b/vendor/hashbrown/src/external_trait_impls/rayon/map.rs @@ -232,11 +232,11 @@ impl fmt::Debug for ParValuesMut<'_, K, V> { /// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter /// [`HashMap`]: /hashbrown/struct.HashMap.html /// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter { +pub struct IntoParIter { inner: RawIntoParIter<(K, V), A>, } -impl ParallelIterator for IntoParIter { +impl ParallelIterator for IntoParIter { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -248,9 +248,7 @@ impl ParallelIterator for IntoPar } } -impl fmt::Debug - for IntoParIter -{ +impl fmt::Debug for IntoParIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ParIter { inner: unsafe { self.inner.par_iter() }, @@ -267,11 +265,11 @@ impl fmt::Debug /// /// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain /// [`HashMap`]: /hashbrown/struct.HashMap.html -pub struct ParDrain<'a, K, V, A: Allocator + Clone = Global> { +pub struct ParDrain<'a, K, V, A: Allocator = Global> { inner: RawParDrain<'a, (K, V), A>, } -impl ParallelIterator for ParDrain<'_, K, V, A> { +impl ParallelIterator for ParDrain<'_, K, V, A> { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -283,9 +281,7 @@ impl ParallelIterator for ParDrai } } -impl fmt::Debug - for ParDrain<'_, K, V, A> -{ +impl fmt::Debug for ParDrain<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ParIter { inner: unsafe { self.inner.par_iter() }, @@ -295,7 +291,7 @@ impl fmt::Debug } } -impl HashMap { +impl HashMap { /// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order. #[cfg_attr(feature = "inline-more", inline)] pub fn par_keys(&self) -> ParKeys<'_, K, V> { @@ -315,7 +311,7 @@ impl HashMap { } } -impl HashMap { +impl HashMap { /// Visits (potentially in parallel) mutably borrowed values in an arbitrary order. #[cfg_attr(feature = "inline-more", inline)] pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { @@ -340,7 +336,7 @@ where K: Eq + Hash + Sync, V: PartialEq + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { /// Returns `true` if the map is equal to another, /// i.e. both maps contain the same keys mapped to the same values. @@ -354,9 +350,7 @@ where } } -impl IntoParallelIterator - for HashMap -{ +impl IntoParallelIterator for HashMap { type Item = (K, V); type Iter = IntoParIter; @@ -368,9 +362,7 @@ impl IntoParallelIterator } } -impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator - for &'a HashMap -{ +impl<'a, K: Sync, V: Sync, S, A: Allocator> IntoParallelIterator for &'a HashMap { type Item = (&'a K, &'a V); type Iter = ParIter<'a, K, V>; @@ -383,9 +375,7 @@ impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator } } -impl<'a, K: Sync, V: Send, S, A: Allocator + Clone> IntoParallelIterator - for &'a mut HashMap -{ +impl<'a, K: Sync, V: Send, S, A: Allocator> IntoParallelIterator for &'a mut HashMap { type Item = (&'a K, &'a mut V); type Iter = ParIterMut<'a, K, V>; @@ -424,7 +414,7 @@ where K: Eq + Hash + Send, V: Send, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn par_extend(&mut self, par_iter: I) where @@ -440,7 +430,7 @@ where K: Copy + Eq + Hash + Sync, V: Copy + Sync, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn par_extend(&mut self, par_iter: I) where @@ -456,7 +446,7 @@ where K: Eq + Hash, S: BuildHasher, I: IntoParallelIterator, - A: Allocator + Clone, + A: Allocator, HashMap: Extend, { let (list, len) = super::helpers::collect(par_iter); @@ -561,10 +551,7 @@ mod test_par_map { assert_eq!(value.load(Ordering::Relaxed), 100); // retain only half - let _v: Vec<_> = hm - .into_par_iter() - .filter(|&(ref key, _)| key.k < 50) - .collect(); + let _v: Vec<_> = hm.into_par_iter().filter(|(key, _)| key.k < 50).collect(); assert_eq!(key.load(Ordering::Relaxed), 50); assert_eq!(value.load(Ordering::Relaxed), 50); @@ -611,7 +598,7 @@ mod test_par_map { assert_eq!(value.load(Ordering::Relaxed), 100); // retain only half - let _v: Vec<_> = hm.drain().filter(|&(ref key, _)| key.k < 50).collect(); + let _v: Vec<_> = hm.drain().filter(|(key, _)| key.k < 50).collect(); assert!(hm.is_empty()); assert_eq!(key.load(Ordering::Relaxed), 50); diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/mod.rs b/vendor/hashbrown/src/external_trait_impls/rayon/mod.rs index 99337a1..61ca69b 100644 --- a/vendor/hashbrown/src/external_trait_impls/rayon/mod.rs +++ b/vendor/hashbrown/src/external_trait_impls/rayon/mod.rs @@ -2,3 +2,4 @@ mod helpers; pub(crate) mod map; pub(crate) mod raw; pub(crate) mod set; +pub(crate) mod table; diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/raw.rs b/vendor/hashbrown/src/external_trait_impls/rayon/raw.rs index 883303e..612be47 100644 --- a/vendor/hashbrown/src/external_trait_impls/rayon/raw.rs +++ b/vendor/hashbrown/src/external_trait_impls/rayon/raw.rs @@ -1,7 +1,6 @@ use crate::raw::Bucket; use crate::raw::{Allocator, Global, RawIter, RawIterRange, RawTable}; use crate::scopeguard::guard; -use alloc::alloc::dealloc; use core::marker::PhantomData; use core::mem; use core::ptr::NonNull; @@ -76,18 +75,18 @@ impl UnindexedProducer for ParIterProducer { } /// Parallel iterator which consumes a table and returns elements. -pub struct RawIntoParIter { +pub struct RawIntoParIter { table: RawTable, } -impl RawIntoParIter { +impl RawIntoParIter { #[cfg_attr(feature = "inline-more", inline)] pub(super) unsafe fn par_iter(&self) -> RawParIter { self.table.par_iter() } } -impl ParallelIterator for RawIntoParIter { +impl ParallelIterator for RawIntoParIter { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -97,9 +96,9 @@ impl ParallelIterator for RawIntoParIter ParallelIterator for RawIntoParIter { +pub struct RawParDrain<'a, T, A: Allocator = Global> { // We don't use a &'a mut RawTable because we want RawParDrain to be // covariant over T. table: NonNull>, marker: PhantomData<&'a RawTable>, } -unsafe impl Send for RawParDrain<'_, T, A> {} +unsafe impl Send for RawParDrain<'_, T, A> {} -impl RawParDrain<'_, T, A> { +impl RawParDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] pub(super) unsafe fn par_iter(&self) -> RawParIter { self.table.as_ref().par_iter() } } -impl ParallelIterator for RawParDrain<'_, T, A> { +impl ParallelIterator for RawParDrain<'_, T, A> { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -143,7 +142,7 @@ impl ParallelIterator for RawParDrain<'_, T, A> { } } -impl Drop for RawParDrain<'_, T, A> { +impl Drop for RawParDrain<'_, T, A> { fn drop(&mut self) { // If drive_unindexed is not called then simply clear the table. unsafe { @@ -204,7 +203,7 @@ impl Drop for ParDrainProducer { } } -impl RawTable { +impl RawTable { /// Returns a parallel iterator over the elements in a `RawTable`. #[cfg_attr(feature = "inline-more", inline)] pub unsafe fn par_iter(&self) -> RawParIter { diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/set.rs b/vendor/hashbrown/src/external_trait_impls/rayon/set.rs index ee4f6e6..3de98fc 100644 --- a/vendor/hashbrown/src/external_trait_impls/rayon/set.rs +++ b/vendor/hashbrown/src/external_trait_impls/rayon/set.rs @@ -16,11 +16,11 @@ use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, Pa /// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter /// [`HashSet`]: /hashbrown/struct.HashSet.html /// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter { +pub struct IntoParIter { inner: map::IntoParIter, } -impl ParallelIterator for IntoParIter { +impl ParallelIterator for IntoParIter { type Item = T; fn drive_unindexed(self, consumer: C) -> C::Result @@ -38,11 +38,11 @@ impl ParallelIterator for IntoParIter { +pub struct ParDrain<'a, T, A: Allocator = Global> { inner: map::ParDrain<'a, T, (), A>, } -impl ParallelIterator for ParDrain<'_, T, A> { +impl ParallelIterator for ParDrain<'_, T, A> { type Item = T; fn drive_unindexed(self, consumer: C) -> C::Result @@ -85,7 +85,7 @@ impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { /// /// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParDifference<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParDifference<'a, T, S, A: Allocator = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -94,7 +94,7 @@ impl<'a, T, S, A> ParallelIterator for ParDifference<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -118,7 +118,7 @@ where /// /// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParSymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParSymmetricDifference<'a, T, S, A: Allocator = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -127,7 +127,7 @@ impl<'a, T, S, A> ParallelIterator for ParSymmetricDifference<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -150,7 +150,7 @@ where /// /// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParIntersection<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParIntersection<'a, T, S, A: Allocator = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -159,7 +159,7 @@ impl<'a, T, S, A> ParallelIterator for ParIntersection<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -181,7 +181,7 @@ where /// /// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParUnion<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParUnion<'a, T, S, A: Allocator = Global> { a: &'a HashSet, b: &'a HashSet, } @@ -190,7 +190,7 @@ impl<'a, T, S, A> ParallelIterator for ParUnion<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -216,7 +216,7 @@ impl HashSet where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { /// Visits (potentially in parallel) the values representing the union, /// i.e. all the values in `self` or `other`, without duplicates. @@ -289,7 +289,7 @@ where impl HashSet where T: Eq + Hash + Send, - A: Allocator + Clone + Send, + A: Allocator + Send, { /// Consumes (potentially in parallel) all values in an arbitrary order, /// while preserving the set's allocated memory for reuse. @@ -301,7 +301,7 @@ where } } -impl IntoParallelIterator for HashSet { +impl IntoParallelIterator for HashSet { type Item = T; type Iter = IntoParIter; @@ -313,7 +313,7 @@ impl IntoParallelIterator for HashSet IntoParallelIterator for &'a HashSet { +impl<'a, T: Sync, S, A: Allocator> IntoParallelIterator for &'a HashSet { type Item = &'a T; type Iter = ParIter<'a, T>; @@ -374,7 +374,7 @@ fn extend(set: &mut HashSet, par_iter: I) where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, I: IntoParallelIterator, HashSet: Extend, { diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/table.rs b/vendor/hashbrown/src/external_trait_impls/rayon/table.rs new file mode 100644 index 0000000..e8e5094 --- /dev/null +++ b/vendor/hashbrown/src/external_trait_impls/rayon/table.rs @@ -0,0 +1,252 @@ +//! Rayon extensions for `HashTable`. + +use super::raw::{RawIntoParIter, RawParDrain, RawParIter}; +use crate::hash_table::HashTable; +use crate::raw::{Allocator, Global}; +use core::fmt; +use core::marker::PhantomData; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; + +/// Parallel iterator over shared references to entries in a map. +/// +/// This iterator is created by the [`par_iter`] method on [`HashTable`] +/// (provided by the [`IntoParallelRefIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter`]: /hashbrown/struct.HashTable.html#method.par_iter +/// [`HashTable`]: /hashbrown/struct.HashTable.html +/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html +pub struct ParIter<'a, T> { + inner: RawParIter, + marker: PhantomData<&'a T>, +} + +impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { x.as_ref() }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParIter<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for ParIter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = unsafe { self.inner.iter() }.map(|x| unsafe { x.as_ref() }); + f.debug_list().entries(iter).finish() + } +} + +/// Parallel iterator over mutable references to entries in a map. +/// +/// This iterator is created by the [`par_iter_mut`] method on [`HashTable`] +/// (provided by the [`IntoParallelRefMutIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter_mut`]: /hashbrown/struct.HashTable.html#method.par_iter_mut +/// [`HashTable`]: /hashbrown/struct.HashTable.html +/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html +pub struct ParIterMut<'a, T> { + inner: RawParIter, + marker: PhantomData<&'a mut T>, +} + +impl<'a, T: Send> ParallelIterator for ParIterMut<'a, T> { + type Item = &'a mut T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { x.as_mut() }) + .drive_unindexed(consumer) + } +} + +impl fmt::Debug for ParIterMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: self.inner.clone(), + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel iterator over entries of a consumed map. +/// +/// This iterator is created by the [`into_par_iter`] method on [`HashTable`] +/// (provided by the [`IntoParallelIterator`] trait). +/// See its documentation for more. +/// +/// [`into_par_iter`]: /hashbrown/struct.HashTable.html#method.into_par_iter +/// [`HashTable`]: /hashbrown/struct.HashTable.html +/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html +pub struct IntoParIter { + inner: RawIntoParIter, +} + +impl ParallelIterator for IntoParIter { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.drive_unindexed(consumer) + } +} + +impl fmt::Debug for IntoParIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: unsafe { self.inner.par_iter() }, + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel draining iterator over entries of a map. +/// +/// This iterator is created by the [`par_drain`] method on [`HashTable`]. +/// See its documentation for more. +/// +/// [`par_drain`]: /hashbrown/struct.HashTable.html#method.par_drain +/// [`HashTable`]: /hashbrown/struct.HashTable.html +pub struct ParDrain<'a, T, A: Allocator = Global> { + inner: RawParDrain<'a, T, A>, +} + +impl ParallelIterator for ParDrain<'_, T, A> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.drive_unindexed(consumer) + } +} + +impl fmt::Debug for ParDrain<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: unsafe { self.inner.par_iter() }, + marker: PhantomData, + } + .fmt(f) + } +} + +impl HashTable { + /// Consumes (potentially in parallel) all values in an arbitrary order, + /// while preserving the map's allocated memory for reuse. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> ParDrain<'_, T, A> { + ParDrain { + inner: self.raw.par_drain(), + } + } +} + +impl IntoParallelIterator for HashTable { + type Item = T; + type Iter = IntoParIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + inner: self.raw.into_par_iter(), + } + } +} + +impl<'a, T: Sync, A: Allocator> IntoParallelIterator for &'a HashTable { + type Item = &'a T; + type Iter = ParIter<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIter { + inner: unsafe { self.raw.par_iter() }, + marker: PhantomData, + } + } +} + +impl<'a, T: Send, A: Allocator> IntoParallelIterator for &'a mut HashTable { + type Item = &'a mut T; + type Iter = ParIterMut<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIterMut { + inner: unsafe { self.raw.par_iter() }, + marker: PhantomData, + } + } +} + +#[cfg(test)] +mod test_par_table { + use alloc::vec::Vec; + use core::sync::atomic::{AtomicUsize, Ordering}; + + use rayon::prelude::*; + + use crate::{ + hash_map::{make_hash, DefaultHashBuilder}, + hash_table::HashTable, + }; + + #[test] + fn test_iterate() { + let hasher = DefaultHashBuilder::default(); + let mut a = HashTable::new(); + for i in 0..32 { + a.insert_unique(make_hash(&hasher, &i), i, |x| make_hash(&hasher, x)); + } + let observed = AtomicUsize::new(0); + a.par_iter().for_each(|k| { + observed.fetch_or(1 << *k, Ordering::Relaxed); + }); + assert_eq!(observed.into_inner(), 0xFFFF_FFFF); + } + + #[test] + fn test_move_iter() { + let hasher = DefaultHashBuilder::default(); + let hs = { + let mut hs = HashTable::new(); + + hs.insert_unique(make_hash(&hasher, &'a'), 'a', |x| make_hash(&hasher, x)); + hs.insert_unique(make_hash(&hasher, &'b'), 'b', |x| make_hash(&hasher, x)); + + hs + }; + + let v = hs.into_par_iter().collect::>(); + assert!(v == ['a', 'b'] || v == ['b', 'a']); + } +} diff --git a/vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs b/vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs new file mode 100644 index 0000000..fae7f76 --- /dev/null +++ b/vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs @@ -0,0 +1,125 @@ +use crate::HashMap; +use core::{ + borrow::Borrow, + hash::{BuildHasher, Hash}, +}; +use rkyv::{ + collections::hash_map::{ArchivedHashMap, HashMapResolver}, + ser::{ScratchSpace, Serializer}, + Archive, Deserialize, Fallible, Serialize, +}; + +impl Archive for HashMap +where + K::Archived: Hash + Eq, +{ + type Archived = ArchivedHashMap; + type Resolver = HashMapResolver; + + #[inline] + unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) { + ArchivedHashMap::resolve_from_len(self.len(), pos, resolver, out); + } +} + +impl Serialize for HashMap +where + K: Serialize + Hash + Eq, + K::Archived: Hash + Eq, + V: Serialize, + S: Serializer + ScratchSpace + ?Sized, +{ + #[inline] + fn serialize(&self, serializer: &mut S) -> Result { + unsafe { ArchivedHashMap::serialize_from_iter(self.iter(), serializer) } + } +} + +impl + Deserialize, D> for ArchivedHashMap +where + K::Archived: Deserialize + Hash + Eq, + V::Archived: Deserialize, +{ + #[inline] + fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { + let mut result = HashMap::with_capacity_and_hasher(self.len(), S::default()); + for (k, v) in self.iter() { + result.insert(k.deserialize(deserializer)?, v.deserialize(deserializer)?); + } + Ok(result) + } +} + +impl, V, AK: Hash + Eq, AV: PartialEq, S: BuildHasher> + PartialEq> for ArchivedHashMap +{ + #[inline] + fn eq(&self, other: &HashMap) -> bool { + if self.len() != other.len() { + false + } else { + self.iter() + .all(|(key, value)| other.get(key).map_or(false, |v| value.eq(v))) + } + } +} + +impl, V, AK: Hash + Eq, AV: PartialEq> + PartialEq> for HashMap +{ + #[inline] + fn eq(&self, other: &ArchivedHashMap) -> bool { + other.eq(self) + } +} + +#[cfg(test)] +mod tests { + use crate::HashMap; + use alloc::string::String; + use rkyv::{ + archived_root, check_archived_root, + ser::{serializers::AllocSerializer, Serializer}, + Deserialize, Infallible, + }; + + #[test] + fn index_map() { + let mut value = HashMap::new(); + value.insert(String::from("foo"), 10); + value.insert(String::from("bar"), 20); + value.insert(String::from("baz"), 40); + value.insert(String::from("bat"), 80); + + let mut serializer = AllocSerializer::<4096>::default(); + serializer.serialize_value(&value).unwrap(); + let result = serializer.into_serializer().into_inner(); + let archived = unsafe { archived_root::>(result.as_ref()) }; + + assert_eq!(value.len(), archived.len()); + for (k, v) in value.iter() { + let (ak, av) = archived.get_key_value(k.as_str()).unwrap(); + assert_eq!(k, ak); + assert_eq!(v, av); + } + + let deserialized: HashMap = archived.deserialize(&mut Infallible).unwrap(); + assert_eq!(value, deserialized); + } + + #[test] + fn validate_index_map() { + let mut value = HashMap::new(); + value.insert(String::from("foo"), 10); + value.insert(String::from("bar"), 20); + value.insert(String::from("baz"), 40); + value.insert(String::from("bat"), 80); + + let mut serializer = AllocSerializer::<4096>::default(); + serializer.serialize_value(&value).unwrap(); + let result = serializer.into_serializer().into_inner(); + check_archived_root::>(result.as_ref()) + .expect("failed to validate archived index map"); + } +} diff --git a/vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs b/vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs new file mode 100644 index 0000000..c8a69cf --- /dev/null +++ b/vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs @@ -0,0 +1,123 @@ +use crate::HashSet; +use core::{ + borrow::Borrow, + hash::{BuildHasher, Hash}, +}; +use rkyv::{ + collections::hash_set::{ArchivedHashSet, HashSetResolver}, + ser::{ScratchSpace, Serializer}, + Archive, Deserialize, Fallible, Serialize, +}; + +impl Archive for HashSet +where + K::Archived: Hash + Eq, +{ + type Archived = ArchivedHashSet; + type Resolver = HashSetResolver; + + #[inline] + unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) { + ArchivedHashSet::::resolve_from_len(self.len(), pos, resolver, out); + } +} + +impl Serialize for HashSet +where + K::Archived: Hash + Eq, + K: Serialize + Hash + Eq, + S: ScratchSpace + Serializer + ?Sized, +{ + #[inline] + fn serialize(&self, serializer: &mut S) -> Result { + unsafe { ArchivedHashSet::serialize_from_iter(self.iter(), serializer) } + } +} + +impl Deserialize, D> for ArchivedHashSet +where + K: Archive + Hash + Eq, + K::Archived: Deserialize + Hash + Eq, + D: Fallible + ?Sized, + S: Default + BuildHasher, +{ + #[inline] + fn deserialize(&self, deserializer: &mut D) -> Result, D::Error> { + let mut result = HashSet::with_hasher(S::default()); + for k in self.iter() { + result.insert(k.deserialize(deserializer)?); + } + Ok(result) + } +} + +impl, AK: Hash + Eq, S: BuildHasher> PartialEq> + for ArchivedHashSet +{ + #[inline] + fn eq(&self, other: &HashSet) -> bool { + if self.len() != other.len() { + false + } else { + self.iter().all(|key| other.get(key).is_some()) + } + } +} + +impl, AK: Hash + Eq, S: BuildHasher> PartialEq> + for HashSet +{ + #[inline] + fn eq(&self, other: &ArchivedHashSet) -> bool { + other.eq(self) + } +} + +#[cfg(test)] +mod tests { + use crate::HashSet; + use alloc::string::String; + use rkyv::{ + archived_root, check_archived_root, + ser::{serializers::AllocSerializer, Serializer}, + Deserialize, Infallible, + }; + + #[test] + fn index_set() { + let mut value = HashSet::new(); + value.insert(String::from("foo")); + value.insert(String::from("bar")); + value.insert(String::from("baz")); + value.insert(String::from("bat")); + + let mut serializer = AllocSerializer::<4096>::default(); + serializer.serialize_value(&value).unwrap(); + let result = serializer.into_serializer().into_inner(); + let archived = unsafe { archived_root::>(result.as_ref()) }; + + assert_eq!(value.len(), archived.len()); + for k in value.iter() { + let ak = archived.get(k.as_str()).unwrap(); + assert_eq!(k, ak); + } + + let deserialized: HashSet = archived.deserialize(&mut Infallible).unwrap(); + assert_eq!(value, deserialized); + } + + #[test] + fn validate_index_set() { + let mut value = HashSet::new(); + value.insert(String::from("foo")); + value.insert(String::from("bar")); + value.insert(String::from("baz")); + value.insert(String::from("bat")); + + let mut serializer = AllocSerializer::<4096>::default(); + serializer.serialize_value(&value).unwrap(); + let result = serializer.into_serializer().into_inner(); + check_archived_root::>(result.as_ref()) + .expect("failed to validate archived index set"); + } +} diff --git a/vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs b/vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs new file mode 100644 index 0000000..2bde6a0 --- /dev/null +++ b/vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs @@ -0,0 +1,2 @@ +mod hash_map; +mod hash_set; diff --git a/vendor/hashbrown/src/external_trait_impls/serde.rs b/vendor/hashbrown/src/external_trait_impls/serde.rs index 4d62dee..0a76dbe 100644 --- a/vendor/hashbrown/src/external_trait_impls/serde.rs +++ b/vendor/hashbrown/src/external_trait_impls/serde.rs @@ -11,6 +11,7 @@ mod size_hint { } mod map { + use crate::raw::Allocator; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::marker::PhantomData; @@ -21,11 +22,12 @@ mod map { use super::size_hint; - impl Serialize for HashMap + impl Serialize for HashMap where K: Serialize + Eq + Hash, V: Serialize, H: BuildHasher, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn serialize(&self, serializer: S) -> Result @@ -36,40 +38,46 @@ mod map { } } - impl<'de, K, V, S> Deserialize<'de> for HashMap + impl<'de, K, V, S, A> Deserialize<'de> for HashMap where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: BuildHasher + Default, + A: Allocator + Default, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct MapVisitor { - marker: PhantomData>, + struct MapVisitor + where + A: Allocator, + { + marker: PhantomData>, } - impl<'de, K, V, S> Visitor<'de> for MapVisitor + impl<'de, K, V, S, A> Visitor<'de> for MapVisitor where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: BuildHasher + Default, + A: Allocator + Default, { - type Value = HashMap; + type Value = HashMap; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a map") } #[cfg_attr(feature = "inline-more", inline)] - fn visit_map(self, mut map: A) -> Result + fn visit_map(self, mut map: M) -> Result where - A: MapAccess<'de>, + M: MapAccess<'de>, { - let mut values = HashMap::with_capacity_and_hasher( + let mut values = HashMap::with_capacity_and_hasher_in( size_hint::cautious(map.size_hint()), S::default(), + A::default(), ); while let Some((key, value)) = map.next_entry()? { @@ -89,6 +97,7 @@ mod map { } mod set { + use crate::raw::Allocator; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::marker::PhantomData; @@ -99,10 +108,11 @@ mod set { use super::size_hint; - impl Serialize for HashSet + impl Serialize for HashSet where T: Serialize + Eq + Hash, H: BuildHasher, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn serialize(&self, serializer: S) -> Result @@ -113,38 +123,44 @@ mod set { } } - impl<'de, T, S> Deserialize<'de> for HashSet + impl<'de, T, S, A> Deserialize<'de> for HashSet where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, + A: Allocator + Default, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct SeqVisitor { - marker: PhantomData>, + struct SeqVisitor + where + A: Allocator, + { + marker: PhantomData>, } - impl<'de, T, S> Visitor<'de> for SeqVisitor + impl<'de, T, S, A> Visitor<'de> for SeqVisitor where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, + A: Allocator + Default, { - type Value = HashSet; + type Value = HashSet; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") } #[cfg_attr(feature = "inline-more", inline)] - fn visit_seq(self, mut seq: A) -> Result + fn visit_seq(self, mut seq: M) -> Result where - A: SeqAccess<'de>, + M: SeqAccess<'de>, { - let mut values = HashSet::with_capacity_and_hasher( + let mut values = HashSet::with_capacity_and_hasher_in( size_hint::cautious(seq.size_hint()), S::default(), + A::default(), ); while let Some(value) = seq.next_element()? { @@ -166,12 +182,15 @@ mod set { where D: Deserializer<'de>, { - struct SeqInPlaceVisitor<'a, T, S>(&'a mut HashSet); + struct SeqInPlaceVisitor<'a, T, S, A>(&'a mut HashSet) + where + A: Allocator; - impl<'a, 'de, T, S> Visitor<'de> for SeqInPlaceVisitor<'a, T, S> + impl<'a, 'de, T, S, A> Visitor<'de> for SeqInPlaceVisitor<'a, T, S, A> where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, + A: Allocator, { type Value = (); @@ -180,9 +199,9 @@ mod set { } #[cfg_attr(feature = "inline-more", inline)] - fn visit_seq(self, mut seq: A) -> Result + fn visit_seq(self, mut seq: M) -> Result where - A: SeqAccess<'de>, + M: SeqAccess<'de>, { self.0.clear(); self.0.reserve(size_hint::cautious(seq.size_hint())); diff --git a/vendor/hashbrown/src/lib.rs b/vendor/hashbrown/src/lib.rs index bc1c971..6e9592a 100644 --- a/vendor/hashbrown/src/lib.rs +++ b/vendor/hashbrown/src/lib.rs @@ -20,9 +20,8 @@ extend_one, allocator_api, slice_ptr_get, - nonnull_slice_from_raw_parts, maybe_uninit_array_assume_init, - build_hasher_simple_hash_one + strict_provenance ) )] #![allow( @@ -37,6 +36,7 @@ )] #![warn(missing_docs)] #![warn(rust_2018_idioms)] +#![cfg_attr(feature = "nightly", warn(fuzzy_provenance_casts))] #[cfg(test)] #[macro_use] @@ -81,6 +81,7 @@ mod map; mod rustc_entry; mod scopeguard; mod set; +mod table; pub mod hash_map { //! A hash map implemented with quadratic probing and SIMD lookup. @@ -113,9 +114,63 @@ pub mod hash_set { pub use crate::external_trait_impls::rayon::set::*; } } +pub mod hash_table { + //! A hash table implemented with quadratic probing and SIMD lookup. + pub use crate::table::*; + + #[cfg(feature = "rayon")] + /// [rayon]-based parallel iterator types for hash tables. + /// You will rarely need to interact with it directly unless you have need + /// to name one of the iterator types. + /// + /// [rayon]: https://docs.rs/rayon/1.0/rayon + pub mod rayon { + pub use crate::external_trait_impls::rayon::table::*; + } +} pub use crate::map::HashMap; pub use crate::set::HashSet; +pub use crate::table::HashTable; + +#[cfg(feature = "equivalent")] +pub use equivalent::Equivalent; + +// This is only used as a fallback when building as part of `std`. +#[cfg(not(feature = "equivalent"))] +/// Key equivalence trait. +/// +/// This trait defines the function used to compare the input value with the +/// map keys (or set values) during a lookup operation such as [`HashMap::get`] +/// or [`HashSet::contains`]. +/// It is provided with a blanket implementation based on the +/// [`Borrow`](core::borrow::Borrow) trait. +/// +/// # Correctness +/// +/// Equivalent values must hash to the same value. +pub trait Equivalent { + /// Checks if this value is equivalent to the given key. + /// + /// Returns `true` if both values are equivalent, and `false` otherwise. + /// + /// # Correctness + /// + /// When this function returns `true`, both `self` and `key` must hash to + /// the same value. + fn equivalent(&self, key: &K) -> bool; +} + +#[cfg(not(feature = "equivalent"))] +impl Equivalent for Q +where + Q: Eq, + K: core::borrow::Borrow, +{ + fn equivalent(&self, key: &K) -> bool { + self == key.borrow() + } +} /// The error type for `try_reserve` methods. #[derive(Clone, PartialEq, Eq, Debug)] @@ -130,21 +185,3 @@ pub enum TryReserveError { layout: alloc::alloc::Layout, }, } - -/// Wrapper around `Bump` which allows it to be used as an allocator for -/// `HashMap`, `HashSet` and `RawTable`. -/// -/// `Bump` can be used directly without this wrapper on nightly if you enable -/// the `allocator-api` feature of the `bumpalo` crate. -#[cfg(feature = "bumpalo")] -#[derive(Clone, Copy, Debug)] -pub struct BumpWrapper<'a>(pub &'a bumpalo::Bump); - -#[cfg(feature = "bumpalo")] -#[test] -fn test_bumpalo() { - use bumpalo::Bump; - let bump = Bump::new(); - let mut map = HashMap::new_in(BumpWrapper(&bump)); - map.insert(0, 1); -} diff --git a/vendor/hashbrown/src/macros.rs b/vendor/hashbrown/src/macros.rs index f8ef917..eaba6be 100644 --- a/vendor/hashbrown/src/macros.rs +++ b/vendor/hashbrown/src/macros.rs @@ -37,7 +37,7 @@ macro_rules! cfg_if { // semicolon is all the remaining items (@__items ($($not:meta,)*) ; ) => {}; (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { - // Emit all items within one block, applying an approprate #[cfg]. The + // Emit all items within one block, applying an appropriate #[cfg]. The // #[cfg] will require all `$m` matchers specified and must also negate // all previous matchers. cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } diff --git a/vendor/hashbrown/src/map.rs b/vendor/hashbrown/src/map.rs index a5d3ccb..b5e657b 100644 --- a/vendor/hashbrown/src/map.rs +++ b/vendor/hashbrown/src/map.rs @@ -1,5 +1,7 @@ -use crate::raw::{Allocator, Bucket, Global, RawDrain, RawIntoIter, RawIter, RawTable}; -use crate::TryReserveError; +use crate::raw::{ + Allocator, Bucket, Global, RawDrain, RawExtractIf, RawIntoIter, RawIter, RawTable, +}; +use crate::{Equivalent, TryReserveError}; use core::borrow::Borrow; use core::fmt::{self, Debug}; use core::hash::{BuildHasher, Hash}; @@ -10,7 +12,7 @@ use core::ops::Index; /// Default hasher for `HashMap`. #[cfg(feature = "ahash")] -pub type DefaultHashBuilder = ahash::RandomState; +pub type DefaultHashBuilder = core::hash::BuildHasherDefault; /// Dummy default hasher for `HashMap`. #[cfg(not(feature = "ahash"))] @@ -185,7 +187,7 @@ pub enum DefaultHashBuilder {} /// .iter().cloned().collect(); /// // use the values stored in map /// ``` -pub struct HashMap { +pub struct HashMap { pub(crate) hash_builder: S, pub(crate) table: RawTable<(K, V), A>, } @@ -209,13 +211,12 @@ impl Clone for HashMap(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_ +pub(crate) fn make_hasher(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_ where - K: Borrow, Q: Hash, S: BuildHasher, { - move |val| make_hash::(hash_builder, &val.0) + move |val| make_hash::(hash_builder, &val.0) } /// Ensures that a single closure type across uses of this which, in turn prevents multiple @@ -223,10 +224,9 @@ where #[cfg_attr(feature = "inline-more", inline)] fn equivalent_key(k: &Q) -> impl Fn(&(K, V)) -> bool + '_ where - K: Borrow, - Q: ?Sized + Eq, + Q: ?Sized + Equivalent, { - move |x| k.eq(x.0.borrow()) + move |x| k.equivalent(&x.0) } /// Ensures that a single closure type across uses of this which, in turn prevents multiple @@ -234,17 +234,15 @@ where #[cfg_attr(feature = "inline-more", inline)] fn equivalent(k: &Q) -> impl Fn(&K) -> bool + '_ where - K: Borrow, - Q: ?Sized + Eq, + Q: ?Sized + Equivalent, { - move |x| k.eq(x.borrow()) + move |x| k.equivalent(x) } #[cfg(not(feature = "nightly"))] #[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 +pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 where - K: Borrow, Q: Hash + ?Sized, S: BuildHasher, { @@ -256,38 +254,14 @@ where #[cfg(feature = "nightly")] #[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 +pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 where - K: Borrow, Q: Hash + ?Sized, S: BuildHasher, { hash_builder.hash_one(val) } -#[cfg(not(feature = "nightly"))] -#[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_insert_hash(hash_builder: &S, val: &K) -> u64 -where - K: Hash, - S: BuildHasher, -{ - use core::hash::Hasher; - let mut state = hash_builder.build_hasher(); - val.hash(&mut state); - state.finish() -} - -#[cfg(feature = "nightly")] -#[cfg_attr(feature = "inline-more", inline)] -pub(crate) fn make_insert_hash(hash_builder: &S, val: &K) -> u64 -where - K: Hash, - S: BuildHasher, -{ - hash_builder.hash_one(val) -} - #[cfg(feature = "ahash")] impl HashMap { /// Creates an empty `HashMap`. @@ -295,6 +269,18 @@ impl HashMap { /// The hash map is initially created with a capacity of 0, so it will not allocate until it /// is first inserted into. /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`], for example with + /// [`with_hasher`](HashMap::with_hasher) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// /// # Examples /// /// ``` @@ -313,6 +299,18 @@ impl HashMap { /// The hash map will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash map will not allocate. /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`], for example with + /// [`with_capacity_and_hasher`](HashMap::with_capacity_and_hasher) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// /// # Examples /// /// ``` @@ -328,11 +326,46 @@ impl HashMap { } #[cfg(feature = "ahash")] -impl HashMap { +impl HashMap { /// Creates an empty `HashMap` using the given allocator. /// /// The hash map is initially created with a capacity of 0, so it will not allocate until it /// is first inserted into. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`], for example with + /// [`with_hasher_in`](HashMap::with_hasher_in) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use bumpalo::Bump; + /// + /// let bump = Bump::new(); + /// let mut map = HashMap::new_in(&bump); + /// + /// // The created HashMap holds none elements + /// assert_eq!(map.len(), 0); + /// + /// // The created HashMap also doesn't allocate memory + /// assert_eq!(map.capacity(), 0); + /// + /// // Now we insert element inside created HashMap + /// map.insert("One", 1); + /// // We can see that the HashMap holds 1 element + /// assert_eq!(map.len(), 1); + /// // And it also allocates some capacity + /// assert!(map.capacity() > 1); + /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn new_in(alloc: A) -> Self { Self::with_hasher_in(DefaultHashBuilder::default(), alloc) @@ -342,6 +375,46 @@ impl HashMap { /// /// The hash map will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash map will not allocate. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`], for example with + /// [`with_capacity_and_hasher_in`](HashMap::with_capacity_and_hasher_in) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use bumpalo::Bump; + /// + /// let bump = Bump::new(); + /// let mut map = HashMap::with_capacity_in(5, &bump); + /// + /// // The created HashMap holds none elements + /// assert_eq!(map.len(), 0); + /// // But it can hold at least 5 elements without reallocating + /// let empty_map_capacity = map.capacity(); + /// assert!(empty_map_capacity >= 5); + /// + /// // Now we insert some 5 elements inside created HashMap + /// map.insert("One", 1); + /// map.insert("Two", 2); + /// map.insert("Three", 3); + /// map.insert("Four", 4); + /// map.insert("Five", 5); + /// + /// // We can see that the HashMap holds 5 elements + /// assert_eq!(map.len(), 5); + /// // But its capacity isn't changed + /// assert_eq!(map.capacity(), empty_map_capacity) + /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { Self::with_capacity_and_hasher_in(capacity, DefaultHashBuilder::default(), alloc) @@ -355,14 +428,21 @@ impl HashMap { /// The hash map is initially created with a capacity of 0, so it will not /// allocate until it is first inserted into. /// - /// Warning: `hash_builder` is normally randomly generated, and - /// is designed to allow HashMaps to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`]. /// /// The `hash_builder` passed should implement the [`BuildHasher`] trait for /// the HashMap to be useful, see its documentation for details. /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// /// # Examples /// /// ``` @@ -376,8 +456,6 @@ impl HashMap { /// /// map.insert(1, 2); /// ``` - /// - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html #[cfg_attr(feature = "inline-more", inline)] pub const fn with_hasher(hash_builder: S) -> Self { Self { @@ -392,14 +470,21 @@ impl HashMap { /// The hash map will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash map will not allocate. /// - /// Warning: `hash_builder` is normally randomly generated, and - /// is designed to allow HashMaps to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`]. /// /// The `hash_builder` passed should implement the [`BuildHasher`] trait for /// the HashMap to be useful, see its documentation for details. /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// /// # Examples /// /// ``` @@ -413,8 +498,6 @@ impl HashMap { /// /// map.insert(1, 2); /// ``` - /// - /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html #[cfg_attr(feature = "inline-more", inline)] pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { Self { @@ -424,7 +507,7 @@ impl HashMap { } } -impl HashMap { +impl HashMap { /// Returns a reference to the underlying allocator. #[inline] pub fn allocator(&self) -> &A { @@ -434,12 +517,19 @@ impl HashMap { /// Creates an empty `HashMap` which will use the given hash builder to hash /// keys. It will be allocated with the given allocator. /// - /// The created map has the default initial capacity. + /// The hash map is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`]. /// - /// Warning: `hash_builder` is normally randomly generated, and - /// is designed to allow HashMaps to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html /// /// # Examples /// @@ -452,7 +542,7 @@ impl HashMap { /// map.insert(1, 2); /// ``` #[cfg_attr(feature = "inline-more", inline)] - pub fn with_hasher_in(hash_builder: S, alloc: A) -> Self { + pub const fn with_hasher_in(hash_builder: S, alloc: A) -> Self { Self { hash_builder, table: RawTable::new_in(alloc), @@ -465,10 +555,16 @@ impl HashMap { /// The hash map will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash map will not allocate. /// - /// Warning: `hash_builder` is normally randomly generated, and - /// is designed to allow HashMaps to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashMap`]. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html /// /// # Examples /// @@ -810,14 +906,11 @@ impl HashMap { /// /// let mut map: HashMap = (0..8).map(|x|(x, x*10)).collect(); /// assert_eq!(map.len(), 8); - /// let capacity_before_retain = map.capacity(); /// /// map.retain(|&k, _| k % 2 == 0); /// /// // We can see, that the number of elements inside map is changed. /// assert_eq!(map.len(), 4); - /// // But map capacity is equal to old one. - /// assert_eq!(map.capacity(), capacity_before_retain); /// /// let mut vec: Vec<(i32, i32)> = map.iter().map(|(&k, &v)| (k, v)).collect(); /// vec.sort_unstable(); @@ -844,26 +937,25 @@ impl HashMap { /// In other words, move all pairs `(k, v)` such that `f(&k, &mut v)` returns `true` out /// into another iterator. /// - /// Note that `drain_filter` lets you mutate every value in the filter closure, regardless of + /// Note that `extract_if` lets you mutate every value in the filter closure, regardless of /// whether you choose to keep or remove it. /// - /// When the returned DrainedFilter is dropped, any remaining elements that satisfy - /// the predicate are dropped from the table. - /// - /// It is unspecified how many more elements will be subjected to the closure - /// if a panic occurs in the closure, or a panic occurs while dropping an element, - /// or if the `DrainFilter` value is leaked. + /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating + /// or the iteration short-circuits, then the remaining elements will be retained. + /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. /// /// Keeps the allocated memory for reuse. /// + /// [`retain()`]: HashMap::retain + /// /// # Examples /// /// ``` /// use hashbrown::HashMap; /// /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); - /// let capacity_before_drain_filter = map.capacity(); - /// let drained: HashMap = map.drain_filter(|k, _v| k % 2 == 0).collect(); + /// + /// let drained: HashMap = map.extract_if(|k, _v| k % 2 == 0).collect(); /// /// let mut evens = drained.keys().cloned().collect::>(); /// let mut odds = map.keys().cloned().collect::>(); @@ -872,27 +964,24 @@ impl HashMap { /// /// assert_eq!(evens, vec![0, 2, 4, 6]); /// assert_eq!(odds, vec![1, 3, 5, 7]); - /// // Map capacity is equal to old one. - /// assert_eq!(map.capacity(), capacity_before_drain_filter); /// /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); /// /// { // Iterator is dropped without being consumed. - /// let d = map.drain_filter(|k, _v| k % 2 != 0); + /// let d = map.extract_if(|k, _v| k % 2 != 0); /// } /// - /// // But the map lens have been reduced by half - /// // even if we do not use DrainFilter iterator. - /// assert_eq!(map.len(), 4); + /// // ExtractIf was not exhausted, therefore no elements were drained. + /// assert_eq!(map.len(), 8); /// ``` #[cfg_attr(feature = "inline-more", inline)] - pub fn drain_filter(&mut self, f: F) -> DrainFilter<'_, K, V, F, A> + pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, K, V, F, A> where F: FnMut(&K, &mut V) -> bool, { - DrainFilter { + ExtractIf { f, - inner: DrainFilterInner { + inner: RawExtractIf { iter: unsafe { self.table.iter() }, table: &mut self.table, }, @@ -984,7 +1073,7 @@ impl HashMap where K: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Reserves capacity for at least `additional` more elements to be inserted /// in the `HashMap`. The collection may reserve more space to avoid @@ -992,9 +1081,12 @@ where /// /// # Panics /// - /// Panics if the new allocation size overflows [`usize`]. + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`try_reserve`](HashMap::try_reserve) instead + /// if you want to handle memory allocation failure. /// - /// [`usize`]: https://doc.rust-lang.org/std/primitive.usize.html + /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html + /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html /// /// # Examples /// @@ -1012,7 +1104,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn reserve(&mut self, additional: usize) { self.table - .reserve(additional, make_hasher::(&self.hash_builder)); + .reserve(additional, make_hasher::<_, V, S>(&self.hash_builder)); } /// Tries to reserve capacity for at least `additional` more elements to be inserted @@ -1062,7 +1154,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { self.table - .try_reserve(additional, make_hasher::(&self.hash_builder)) + .try_reserve(additional, make_hasher::<_, V, S>(&self.hash_builder)) } /// Shrinks the capacity of the map as much as possible. It will drop @@ -1084,7 +1176,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn shrink_to_fit(&mut self) { self.table - .shrink_to(0, make_hasher::(&self.hash_builder)); + .shrink_to(0, make_hasher::<_, V, S>(&self.hash_builder)); } /// Shrinks the capacity of the map with a lower limit. It will drop @@ -1113,7 +1205,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn shrink_to(&mut self, min_capacity: usize) { self.table - .shrink_to(min_capacity, make_hasher::(&self.hash_builder)); + .shrink_to(min_capacity, make_hasher::<_, V, S>(&self.hash_builder)); } /// Gets the given key's corresponding entry in the map for in-place manipulation. @@ -1137,7 +1229,7 @@ where /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S, A> { - let hash = make_insert_hash::(&self.hash_builder, &key); + let hash = make_hash::(&self.hash_builder, &key); if let Some(elem) = self.table.find(hash, equivalent_key(&key)) { Entry::Occupied(OccupiedEntry { hash, @@ -1174,10 +1266,9 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn entry_ref<'a, 'b, Q: ?Sized>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S, A> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { - let hash = make_hash::(&self.hash_builder, key); + let hash = make_hash::(&self.hash_builder, key); if let Some(elem) = self.table.find(hash, equivalent_key(key)) { EntryRef::Occupied(OccupiedEntryRef { hash, @@ -1216,12 +1307,11 @@ where #[inline] pub fn get(&self, k: &Q) -> Option<&V> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { // Avoid `Option::map` because it bloats LLVM IR. match self.get_inner(k) { - Some(&(_, ref v)) => Some(v), + Some((_, v)) => Some(v), None => None, } } @@ -1248,12 +1338,11 @@ where #[inline] pub fn get_key_value(&self, k: &Q) -> Option<(&K, &V)> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { // Avoid `Option::map` because it bloats LLVM IR. match self.get_inner(k) { - Some(&(ref key, ref value)) => Some((key, value)), + Some((key, value)) => Some((key, value)), None => None, } } @@ -1261,13 +1350,12 @@ where #[inline] fn get_inner(&self, k: &Q) -> Option<&(K, V)> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { if self.table.is_empty() { None } else { - let hash = make_hash::(&self.hash_builder, k); + let hash = make_hash::(&self.hash_builder, k); self.table.get(hash, equivalent_key(k)) } } @@ -1298,8 +1386,7 @@ where #[inline] pub fn get_key_value_mut(&mut self, k: &Q) -> Option<(&K, &mut V)> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { // Avoid `Option::map` because it bloats LLVM IR. match self.get_inner_mut(k) { @@ -1330,8 +1417,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn contains_key(&self, k: &Q) -> bool where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { self.get_inner(k).is_some() } @@ -1362,8 +1448,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { // Avoid `Option::map` because it bloats LLVM IR. match self.get_inner_mut(k) { @@ -1375,13 +1460,12 @@ where #[inline] fn get_inner_mut(&mut self, k: &Q) -> Option<&mut (K, V)> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { if self.table.is_empty() { None } else { - let hash = make_hash::(&self.hash_builder, k); + let hash = make_hash::(&self.hash_builder, k); self.table.get_mut(hash, equivalent_key(k)) } } @@ -1431,8 +1515,7 @@ where /// ``` pub fn get_many_mut(&mut self, ks: [&Q; N]) -> Option<[&'_ mut V; N]> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { self.get_many_mut_inner(ks).map(|res| res.map(|(_, v)| v)) } @@ -1487,8 +1570,7 @@ where ks: [&Q; N], ) -> Option<[&'_ mut V; N]> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { self.get_many_unchecked_mut_inner(ks) .map(|res| res.map(|(_, v)| v)) @@ -1543,8 +1625,7 @@ where ks: [&Q; N], ) -> Option<[(&'_ K, &'_ mut V); N]> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { self.get_many_mut_inner(ks) .map(|res| res.map(|(k, v)| (&*k, v))) @@ -1599,8 +1680,7 @@ where ks: [&Q; N], ) -> Option<[(&'_ K, &'_ mut V); N]> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { self.get_many_unchecked_mut_inner(ks) .map(|res| res.map(|(k, v)| (&*k, v))) @@ -1611,12 +1691,11 @@ where ks: [&Q; N], ) -> Option<[&'_ mut (K, V); N]> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { let hashes = self.build_hashes_inner(ks); self.table - .get_many_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow())) + .get_many_mut(hashes, |i, (k, _)| ks[i].equivalent(k)) } unsafe fn get_many_unchecked_mut_inner( @@ -1624,22 +1703,20 @@ where ks: [&Q; N], ) -> Option<[&'_ mut (K, V); N]> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { let hashes = self.build_hashes_inner(ks); self.table - .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow())) + .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].equivalent(k)) } fn build_hashes_inner(&self, ks: [&Q; N]) -> [u64; N] where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { let mut hashes = [0_u64; N]; for i in 0..N { - hashes[i] = make_hash::(&self.hash_builder, ks[i]); + hashes[i] = make_hash::(&self.hash_builder, ks[i]); } hashes } @@ -1672,13 +1749,19 @@ where /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn insert(&mut self, k: K, v: V) -> Option { - let hash = make_insert_hash::(&self.hash_builder, &k); - if let Some((_, item)) = self.table.get_mut(hash, equivalent_key(&k)) { - Some(mem::replace(item, v)) - } else { - self.table - .insert(hash, (k, v), make_hasher::(&self.hash_builder)); - None + let hash = make_hash::(&self.hash_builder, &k); + let hasher = make_hasher::<_, V, S>(&self.hash_builder); + match self + .table + .find_or_find_insert_slot(hash, equivalent_key(&k), hasher) + { + Ok(bucket) => Some(mem::replace(unsafe { &mut bucket.as_mut().1 }, v)), + Err(slot) => { + unsafe { + self.table.insert_in_slot(hash, slot, (k, v)); + } + None + } } } @@ -1733,10 +1816,10 @@ where /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn insert_unique_unchecked(&mut self, k: K, v: V) -> (&K, &mut V) { - let hash = make_insert_hash::(&self.hash_builder, &k); + let hash = make_hash::(&self.hash_builder, &k); let bucket = self .table - .insert(hash, (k, v), make_hasher::(&self.hash_builder)); + .insert(hash, (k, v), make_hasher::<_, V, S>(&self.hash_builder)); let (k_ref, v_ref) = unsafe { bucket.as_mut() }; (k_ref, v_ref) } @@ -1801,19 +1884,17 @@ where /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.insert(1, "a"); - /// let capacity_before_remove = map.capacity(); /// /// assert_eq!(map.remove(&1), Some("a")); /// assert_eq!(map.remove(&1), None); /// - /// // Now map holds none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// // Now map holds none elements + /// assert!(map.is_empty()); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove(&mut self, k: &Q) -> Option where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { // Avoid `Option::map` because it bloats LLVM IR. match self.remove_entry(k) { @@ -1842,26 +1923,24 @@ where /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.insert(1, "a"); - /// let capacity_before_remove = map.capacity(); /// /// assert_eq!(map.remove_entry(&1), Some((1, "a"))); /// assert_eq!(map.remove(&1), None); /// - /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// // Now map hold none elements + /// assert!(map.is_empty()); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove_entry(&mut self, k: &Q) -> Option<(K, V)> where - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { - let hash = make_hash::(&self.hash_builder, k); + let hash = make_hash::(&self.hash_builder, k); self.table.remove_entry(hash, equivalent_key(k)) } } -impl HashMap { +impl HashMap { /// Creates a raw entry builder for the HashMap. /// /// Raw entries provide the lowest level of control for searching and @@ -2013,19 +2092,31 @@ impl HashMap { RawEntryBuilder { map: self } } + /// Returns a reference to the [`RawTable`] used underneath [`HashMap`]. + /// This function is only available if the `raw` feature of the crate is enabled. + /// + /// See [`raw_table_mut`] for more. + /// + /// [`raw_table_mut`]: Self::raw_table_mut + #[cfg(feature = "raw")] + #[cfg_attr(feature = "inline-more", inline)] + pub fn raw_table(&self) -> &RawTable<(K, V), A> { + &self.table + } + /// Returns a mutable reference to the [`RawTable`] used underneath [`HashMap`]. /// This function is only available if the `raw` feature of the crate is enabled. /// /// # Note /// - /// Calling the function safe, but using raw hash table API's may require + /// Calling this function is safe, but using the raw hash table API may require /// unsafe functions or blocks. /// /// `RawTable` API gives the lowest level of control under the map that can be useful /// for extending the HashMap's API, but may lead to *[undefined behavior]*. /// /// [`HashMap`]: struct.HashMap.html - /// [`RawTable`]: raw/struct.RawTable.html + /// [`RawTable`]: crate::raw::RawTable /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html /// /// # Examples @@ -2049,9 +2140,9 @@ impl HashMap { /// where /// F: Fn(&(K, V)) -> bool, /// { - /// let raw_table = map.raw_table(); + /// let raw_table = map.raw_table_mut(); /// match raw_table.find(hash, is_match) { - /// Some(bucket) => Some(unsafe { raw_table.remove(bucket) }), + /// Some(bucket) => Some(unsafe { raw_table.remove(bucket).0 }), /// None => None, /// } /// } @@ -2070,7 +2161,7 @@ impl HashMap { /// ``` #[cfg(feature = "raw")] #[cfg_attr(feature = "inline-more", inline)] - pub fn raw_table(&mut self) -> &mut RawTable<(K, V), A> { + pub fn raw_table_mut(&mut self) -> &mut RawTable<(K, V), A> { &mut self.table } } @@ -2080,7 +2171,7 @@ where K: Eq + Hash, V: PartialEq, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { @@ -2097,7 +2188,7 @@ where K: Eq + Hash, V: Eq, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -2105,7 +2196,7 @@ impl Debug for HashMap where K: Debug, V: Debug, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() @@ -2115,7 +2206,7 @@ where impl Default for HashMap where S: Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// Creates an empty `HashMap`, with the `Default` value for the hasher and allocator. /// @@ -2140,10 +2231,10 @@ where impl Index<&Q> for HashMap where - K: Eq + Hash + Borrow, - Q: Eq + Hash, + K: Eq + Hash, + Q: Hash + Equivalent, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Output = V; @@ -2174,7 +2265,7 @@ where impl From<[(K, V); N]> for HashMap where K: Eq + Hash, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// # Examples /// @@ -2319,11 +2410,11 @@ impl IterMut<'_, K, V> { /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// ``` -pub struct IntoIter { +pub struct IntoIter { inner: RawIntoIter<(K, V), A>, } -impl IntoIter { +impl IntoIter { /// Returns a iterator of references over the remaining items. #[cfg_attr(feature = "inline-more", inline)] pub(super) fn iter(&self) -> Iter<'_, K, V> { @@ -2363,11 +2454,11 @@ impl IntoIter { /// assert_eq!(keys.next(), None); /// assert_eq!(keys.next(), None); /// ``` -pub struct IntoKeys { +pub struct IntoKeys { inner: IntoIter, } -impl Iterator for IntoKeys { +impl Iterator for IntoKeys { type Item = K; #[inline] @@ -2380,16 +2471,16 @@ impl Iterator for IntoKeys { } } -impl ExactSizeIterator for IntoKeys { +impl ExactSizeIterator for IntoKeys { #[inline] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for IntoKeys {} +impl FusedIterator for IntoKeys {} -impl fmt::Debug for IntoKeys { +impl fmt::Debug for IntoKeys { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.inner.iter().map(|(k, _)| k)) @@ -2425,11 +2516,11 @@ impl fmt::Debug for IntoKeys /// assert_eq!(values.next(), None); /// assert_eq!(values.next(), None); /// ``` -pub struct IntoValues { +pub struct IntoValues { inner: IntoIter, } -impl Iterator for IntoValues { +impl Iterator for IntoValues { type Item = V; #[inline] @@ -2442,16 +2533,16 @@ impl Iterator for IntoValues { } } -impl ExactSizeIterator for IntoValues { +impl ExactSizeIterator for IntoValues { #[inline] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for IntoValues {} +impl FusedIterator for IntoValues {} -impl fmt::Debug for IntoValues { +impl fmt::Debug for IntoValues { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.inner.iter().map(|(_, v)| v)) @@ -2583,11 +2674,11 @@ impl fmt::Debug for Values<'_, K, V> { /// assert_eq!(drain_iter.next(), None); /// assert_eq!(drain_iter.next(), None); /// ``` -pub struct Drain<'a, K, V, A: Allocator + Clone = Global> { +pub struct Drain<'a, K, V, A: Allocator = Global> { inner: RawDrain<'a, (K, V), A>, } -impl Drain<'_, K, V, A> { +impl Drain<'_, K, V, A> { /// Returns a iterator of references over the remaining items. #[cfg_attr(feature = "inline-more", inline)] pub(super) fn iter(&self) -> Iter<'_, K, V> { @@ -2601,10 +2692,10 @@ impl Drain<'_, K, V, A> { /// A draining iterator over entries of a `HashMap` which don't satisfy the predicate /// `f(&k, &mut v)` in arbitrary order. The iterator element type is `(K, V)`. /// -/// This `struct` is created by the [`drain_filter`] method on [`HashMap`]. See its +/// This `struct` is created by the [`extract_if`] method on [`HashMap`]. See its /// documentation for more. /// -/// [`drain_filter`]: struct.HashMap.html#method.drain_filter +/// [`extract_if`]: struct.HashMap.html#method.extract_if /// [`HashMap`]: struct.HashMap.html /// /// # Examples @@ -2614,63 +2705,40 @@ impl Drain<'_, K, V, A> { /// /// let mut map: HashMap = [(1, "a"), (2, "b"), (3, "c")].into(); /// -/// let mut drain_filter = map.drain_filter(|k, _v| k % 2 != 0); -/// let mut vec = vec![drain_filter.next(), drain_filter.next()]; +/// let mut extract_if = map.extract_if(|k, _v| k % 2 != 0); +/// let mut vec = vec![extract_if.next(), extract_if.next()]; /// -/// // The `DrainFilter` iterator produces items in arbitrary order, so the +/// // The `ExtractIf` iterator produces items in arbitrary order, so the /// // items must be sorted to test them against a sorted array. /// vec.sort_unstable(); /// assert_eq!(vec, [Some((1, "a")),Some((3, "c"))]); /// /// // It is fused iterator -/// assert_eq!(drain_filter.next(), None); -/// assert_eq!(drain_filter.next(), None); -/// drop(drain_filter); +/// assert_eq!(extract_if.next(), None); +/// assert_eq!(extract_if.next(), None); +/// drop(extract_if); /// /// assert_eq!(map.len(), 1); /// ``` -pub struct DrainFilter<'a, K, V, F, A: Allocator + Clone = Global> +#[must_use = "Iterators are lazy unless consumed"] +pub struct ExtractIf<'a, K, V, F, A: Allocator = Global> where F: FnMut(&K, &mut V) -> bool, { f: F, - inner: DrainFilterInner<'a, K, V, A>, + inner: RawExtractIf<'a, (K, V), A>, } -impl<'a, K, V, F, A> Drop for DrainFilter<'a, K, V, F, A> +impl Iterator for ExtractIf<'_, K, V, F, A> where F: FnMut(&K, &mut V) -> bool, - A: Allocator + Clone, -{ - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - while let Some(item) = self.next() { - let guard = ConsumeAllOnDrop(self); - drop(item); - mem::forget(guard); - } - } -} - -pub(super) struct ConsumeAllOnDrop<'a, T: Iterator>(pub &'a mut T); - -impl Drop for ConsumeAllOnDrop<'_, T> { - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - self.0.for_each(drop); - } -} - -impl Iterator for DrainFilter<'_, K, V, F, A> -where - F: FnMut(&K, &mut V) -> bool, - A: Allocator + Clone, + A: Allocator, { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] fn next(&mut self) -> Option { - self.inner.next(&mut self.f) + self.inner.next(|&mut (ref k, ref mut v)| (self.f)(k, v)) } #[inline] @@ -2679,31 +2747,7 @@ where } } -impl FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} - -/// Portions of `DrainFilter` shared with `set::DrainFilter` -pub(super) struct DrainFilterInner<'a, K, V, A: Allocator + Clone> { - pub iter: RawIter<(K, V)>, - pub table: &'a mut RawTable<(K, V), A>, -} - -impl DrainFilterInner<'_, K, V, A> { - #[cfg_attr(feature = "inline-more", inline)] - pub(super) fn next(&mut self, f: &mut F) -> Option<(K, V)> - where - F: FnMut(&K, &mut V) -> bool, - { - unsafe { - for item in &mut self.iter { - let &mut (ref key, ref mut value) = item.as_mut(); - if f(key, value) { - return Some(self.table.remove(item)); - } - } - } - None - } -} +impl FusedIterator for ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} /// A mutable iterator over the values of a `HashMap` in arbitrary order. /// The iterator element type is `&'a mut V`. @@ -2791,7 +2835,7 @@ pub struct ValuesMut<'a, K, V> { /// /// assert_eq!(map.len(), 6); /// ``` -pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator = Global> { map: &'a mut HashMap, } @@ -2879,7 +2923,7 @@ pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> { /// vec.sort_unstable(); /// assert_eq!(vec, [('a', 10), ('b', 20), ('c', 30), ('d', 40), ('e', 50), ('f', 60)]); /// ``` -pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub enum RawEntryMut<'a, K, V, S, A: Allocator = Global> { /// An occupied entry. /// /// # Examples @@ -2970,7 +3014,7 @@ pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { /// assert_eq!(map.get(&"b"), None); /// assert_eq!(map.len(), 1); /// ``` -pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator = Global> { elem: Bucket<(K, V)>, table: &'a mut RawTable<(K, V), A>, hash_builder: &'a S, @@ -2981,7 +3025,7 @@ where K: Send, V: Send, S: Send, - A: Send + Allocator + Clone, + A: Send + Allocator, { } unsafe impl Sync for RawOccupiedEntryMut<'_, K, V, S, A> @@ -2989,7 +3033,7 @@ where K: Sync, V: Sync, S: Sync, - A: Sync + Allocator + Clone, + A: Sync + Allocator, { } @@ -3041,7 +3085,7 @@ where /// } /// assert!(map[&"c"] == 30 && map.len() == 3); /// ``` -pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator = Global> { table: &'a mut RawTable<(K, V), A>, hash_builder: &'a S, } @@ -3080,11 +3124,11 @@ pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); /// } /// ``` -pub struct RawEntryBuilder<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawEntryBuilder<'a, K, V, S, A: Allocator = Global> { map: &'a HashMap, } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { /// Creates a `RawEntryMut` from the given key. /// /// # Examples @@ -3103,10 +3147,9 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { pub fn from_key(self, k: &Q) -> RawEntryMut<'a, K, V, S, A> where S: BuildHasher, - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { - let hash = make_hash::(&self.map.hash_builder, k); + let hash = make_hash::(&self.map.hash_builder, k); self.from_key_hashed_nocheck(hash, k) } @@ -3136,14 +3179,13 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { #[allow(clippy::wrong_self_convention)] pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S, A> where - K: Borrow, - Q: Eq, + Q: Equivalent, { self.from_hash(hash, equivalent(k)) } } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { /// Creates a `RawEntryMut` from the given hash and matching function. /// /// # Examples @@ -3194,7 +3236,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryBuilder<'a, K, V, S, A> { /// Access an immutable entry by key. /// /// # Examples @@ -3211,10 +3253,9 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { pub fn from_key(self, k: &Q) -> Option<(&'a K, &'a V)> where S: BuildHasher, - K: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { - let hash = make_hash::(&self.map.hash_builder, k); + let hash = make_hash::(&self.map.hash_builder, k); self.from_key_hashed_nocheck(hash, k) } @@ -3242,8 +3283,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { #[allow(clippy::wrong_self_convention)] pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)> where - K: Borrow, - Q: Eq, + Q: Equivalent, { self.from_hash(hash, equivalent(k)) } @@ -3254,7 +3294,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { F: FnMut(&K) -> bool, { match self.map.table.get(hash, |(k, _)| is_match(k)) { - Some(&(ref key, ref value)) => Some((key, value)), + Some((key, value)) => Some((key, value)), None => None, } } @@ -3289,7 +3329,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryMut<'a, K, V, S, A> { /// Sets the value of the entry, and returns a RawOccupiedEntryMut. /// /// # Examples @@ -3483,7 +3523,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawOccupiedEntryMut<'a, K, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -3650,7 +3690,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { #[cfg_attr(feature = "inline-more", inline)] pub fn get_key_value(&self) -> (&K, &V) { unsafe { - let &(ref key, ref value) = self.elem.as_ref(); + let (key, value) = self.elem.as_ref(); (key, value) } } @@ -3822,7 +3862,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.remove(self.elem) } + unsafe { self.table.remove(self.elem).0 } } /// Provides shared access to the key and owned access to the value of @@ -3882,7 +3922,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawVacantEntryMut<'a, K, V, S, A> { /// Sets the value of the entry with the VacantEntry's key, /// and returns a mutable reference to it. /// @@ -3906,7 +3946,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { K: Hash, S: BuildHasher, { - let hash = make_insert_hash::(self.hash_builder, &key); + let hash = make_hash::(self.hash_builder, &key); self.insert_hashed_nocheck(hash, key, value) } @@ -3950,7 +3990,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { let &mut (ref mut k, ref mut v) = self.table.insert_entry( hash, (key, value), - make_hasher::(self.hash_builder), + make_hasher::<_, V, S>(self.hash_builder), ); (k, v) } @@ -4014,11 +4054,11 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { K: Hash, S: BuildHasher, { - let hash = make_insert_hash::(self.hash_builder, &key); + let hash = make_hash::(self.hash_builder, &key); let elem = self.table.insert( hash, (key, value), - make_hasher::(self.hash_builder), + make_hasher::<_, V, S>(self.hash_builder), ); RawOccupiedEntryMut { elem, @@ -4028,13 +4068,13 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { } } -impl Debug for RawEntryBuilderMut<'_, K, V, S, A> { +impl Debug for RawEntryBuilderMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawEntryBuilder").finish() } } -impl Debug for RawEntryMut<'_, K, V, S, A> { +impl Debug for RawEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(), @@ -4043,7 +4083,7 @@ impl Debug for RawEntryMut<'_, K, V } } -impl Debug for RawOccupiedEntryMut<'_, K, V, S, A> { +impl Debug for RawOccupiedEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawOccupiedEntryMut") .field("key", self.key()) @@ -4052,13 +4092,13 @@ impl Debug for RawOccupiedEntryMut< } } -impl Debug for RawVacantEntryMut<'_, K, V, S, A> { +impl Debug for RawVacantEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawVacantEntryMut").finish() } } -impl Debug for RawEntryBuilder<'_, K, V, S, A> { +impl Debug for RawEntryBuilder<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawEntryBuilder").finish() } @@ -4109,7 +4149,7 @@ impl Debug for RawEntryBuilder<'_, K, V, S, A> { /// ``` pub enum Entry<'a, K, V, S, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. /// @@ -4142,7 +4182,7 @@ where Vacant(VacantEntry<'a, K, V, S, A>), } -impl Debug for Entry<'_, K, V, S, A> { +impl Debug for Entry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -4191,7 +4231,7 @@ impl Debug for Entry<'_, K, V, S, A /// assert_eq!(map.get(&"c"), None); /// assert_eq!(map.len(), 2); /// ``` -pub struct OccupiedEntry<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct OccupiedEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> { hash: u64, key: Option, elem: Bucket<(K, V)>, @@ -4203,7 +4243,7 @@ where K: Send, V: Send, S: Send, - A: Send + Allocator + Clone, + A: Send + Allocator, { } unsafe impl Sync for OccupiedEntry<'_, K, V, S, A> @@ -4211,11 +4251,11 @@ where K: Sync, V: Sync, S: Sync, - A: Sync + Allocator + Clone, + A: Sync + Allocator, { } -impl Debug for OccupiedEntry<'_, K, V, S, A> { +impl Debug for OccupiedEntry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) @@ -4254,13 +4294,13 @@ impl Debug for OccupiedEntry<'_, K, /// } /// assert!(map[&"b"] == 20 && map.len() == 2); /// ``` -pub struct VacantEntry<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct VacantEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> { hash: u64, key: K, table: &'a mut HashMap, } -impl Debug for VacantEntry<'_, K, V, S, A> { +impl Debug for VacantEntry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.key()).finish() } @@ -4320,7 +4360,7 @@ impl Debug for VacantEntry<'_, K, V, S, A> /// ``` pub enum EntryRef<'a, 'b, K, Q: ?Sized, V, S, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. /// @@ -4353,7 +4393,7 @@ where Vacant(VacantEntryRef<'a, 'b, K, Q, V, S, A>), } -impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug +impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator> Debug for EntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -4431,7 +4471,7 @@ impl<'a, K: Borrow, Q: ?Sized> AsRef for KeyOrRef<'a, K, Q> { /// assert_eq!(map.get("c"), None); /// assert_eq!(map.len(), 2); /// ``` -pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { +pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> { hash: u64, key: Option>, elem: Bucket<(K, V)>, @@ -4444,7 +4484,7 @@ where Q: Sync + ?Sized, V: Send, S: Send, - A: Send + Allocator + Clone, + A: Send + Allocator, { } unsafe impl<'a, 'b, K, Q, V, S, A> Sync for OccupiedEntryRef<'a, 'b, K, Q, V, S, A> @@ -4453,16 +4493,16 @@ where Q: Sync + ?Sized, V: Sync, S: Sync, - A: Sync + Allocator + Clone, + A: Sync + Allocator, { } -impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug +impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator> Debug for OccupiedEntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntryRef") - .field("key", &self.key()) + .field("key", &self.key().borrow()) .field("value", &self.get()) .finish() } @@ -4498,13 +4538,13 @@ impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug /// } /// assert!(map["b"] == 20 && map.len() == 2); /// ``` -pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { +pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> { hash: u64, key: KeyOrRef<'b, K, Q>, table: &'a mut HashMap, } -impl, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug +impl, Q: ?Sized + Debug, V, S, A: Allocator> Debug for VacantEntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -4536,14 +4576,14 @@ impl, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug /// } /// assert_eq!(map[&"a"], 100); /// ``` -pub struct OccupiedError<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct OccupiedError<'a, K, V, S, A: Allocator = Global> { /// The entry in the map that was already occupied. pub entry: OccupiedEntry<'a, K, V, S, A>, /// The value which was not inserted, because the entry was already occupied. pub value: V, } -impl Debug for OccupiedError<'_, K, V, S, A> { +impl Debug for OccupiedError<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedError") .field("key", self.entry.key()) @@ -4553,9 +4593,7 @@ impl Debug for OccupiedError<'_, K, } } -impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display - for OccupiedError<'a, K, V, S, A> -{ +impl<'a, K: Debug, V: Debug, S, A: Allocator> fmt::Display for OccupiedError<'a, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, @@ -4567,7 +4605,7 @@ impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display } } -impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap { +impl<'a, K, V, S, A: Allocator> IntoIterator for &'a HashMap { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; @@ -4599,7 +4637,7 @@ impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap } } -impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap { +impl<'a, K, V, S, A: Allocator> IntoIterator for &'a mut HashMap { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; @@ -4636,7 +4674,7 @@ impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap IntoIterator for HashMap { +impl IntoIterator for HashMap { type Item = (K, V); type IntoIter = IntoIter; @@ -4731,7 +4769,7 @@ where } } -impl Iterator for IntoIter { +impl Iterator for IntoIter { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -4743,15 +4781,15 @@ impl Iterator for IntoIter { self.inner.size_hint() } } -impl ExactSizeIterator for IntoIter { +impl ExactSizeIterator for IntoIter { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for IntoIter {} +impl FusedIterator for IntoIter {} -impl fmt::Debug for IntoIter { +impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } @@ -4837,7 +4875,7 @@ impl fmt::Debug for ValuesMut<'_, K, V> { } } -impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> { +impl<'a, K, V, A: Allocator> Iterator for Drain<'a, K, V, A> { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -4849,26 +4887,26 @@ impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> { self.inner.size_hint() } } -impl ExactSizeIterator for Drain<'_, K, V, A> { +impl ExactSizeIterator for Drain<'_, K, V, A> { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.inner.len() } } -impl FusedIterator for Drain<'_, K, V, A> {} +impl FusedIterator for Drain<'_, K, V, A> {} impl fmt::Debug for Drain<'_, K, V, A> where K: fmt::Debug, V: fmt::Debug, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } -impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> Entry<'a, K, V, S, A> { /// Sets the value of the entry, and returns an OccupiedEntry. /// /// # Examples @@ -5115,7 +5153,7 @@ impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { } } -impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { +impl<'a, K, V: Default, S, A: Allocator> Entry<'a, K, V, S, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -5148,7 +5186,7 @@ impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> OccupiedEntry<'a, K, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -5183,7 +5221,6 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.entry("poneyland").or_insert(12); - /// let capacity_before_remove = map.capacity(); /// /// if let Entry::Occupied(o) = map.entry("poneyland") { /// // We delete the entry from the map. @@ -5191,12 +5228,12 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { /// } /// /// assert_eq!(map.contains_key("poneyland"), false); - /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// // Now map hold none elements + /// assert!(map.is_empty()); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.table.remove(self.elem) } + unsafe { self.table.table.remove(self.elem).0 } } /// Gets a reference to the value in the entry. @@ -5319,15 +5356,14 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.entry("poneyland").or_insert(12); - /// let capacity_before_remove = map.capacity(); /// /// if let Entry::Occupied(o) = map.entry("poneyland") { /// assert_eq!(o.remove(), 12); /// } /// /// assert_eq!(map.contains_key("poneyland"), false); - /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// // Now map hold none elements + /// assert!(map.is_empty()); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove(self) -> V { @@ -5505,7 +5541,7 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> VacantEntry<'a, K, V, S, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `VacantEntry`. /// @@ -5567,7 +5603,7 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { let entry = table.insert_entry( self.hash, (self.key, value), - make_hasher::(&self.table.hash_builder), + make_hasher::<_, V, S>(&self.table.hash_builder), ); &mut entry.1 } @@ -5581,7 +5617,7 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { let elem = self.table.table.insert( self.hash, (self.key, value), - make_hasher::(&self.table.hash_builder), + make_hasher::<_, V, S>(&self.table.hash_builder), ); OccupiedEntry { hash: self.hash, @@ -5592,7 +5628,7 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> { /// Sets the value of the entry, and returns an OccupiedEntryRef. /// /// # Examples @@ -5682,10 +5718,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, /// Ensures a value is in the entry by inserting, if empty, the result of the default function. /// This method allows for generating key-derived values for insertion by providing the default - /// function a reference to the key that was moved during the `.entry_ref(key)` method call. - /// - /// The reference to the moved key is provided so that cloning or copying the key is - /// unnecessary, unlike with `.or_insert_with(|| ... )`. + /// function an access to the borrower form of the key. /// /// # Examples /// @@ -5737,7 +5770,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, K: Borrow, { match *self { - EntryRef::Occupied(ref entry) => entry.key(), + EntryRef::Occupied(ref entry) => entry.key().borrow(), EntryRef::Vacant(ref entry) => entry.key(), } } @@ -5833,8 +5866,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, #[cfg_attr(feature = "inline-more", inline)] pub fn and_replace_entry_with(self, f: F) -> Self where - F: FnOnce(&Q, V) -> Option, - K: Borrow, + F: FnOnce(&K, V) -> Option, { match self { EntryRef::Occupied(entry) => entry.replace_entry_with(f), @@ -5843,7 +5875,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, } } -impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -5876,7 +5908,7 @@ impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -5893,11 +5925,8 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, /// } /// ``` #[cfg_attr(feature = "inline-more", inline)] - pub fn key(&self) -> &Q - where - K: Borrow, - { - unsafe { &self.elem.as_ref().0 }.borrow() + pub fn key(&self) -> &K { + unsafe { &self.elem.as_ref().0 } } /// Take the ownership of the key and value from the map. @@ -5914,7 +5943,6 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.entry_ref("poneyland").or_insert(12); - /// let capacity_before_remove = map.capacity(); /// /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") { /// // We delete the entry from the map. @@ -5923,11 +5951,11 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, /// /// assert_eq!(map.contains_key("poneyland"), false); /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// assert!(map.is_empty()); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.table.remove(self.elem) } + unsafe { self.table.table.remove(self.elem).0 } } /// Gets a reference to the value in the entry. @@ -6048,7 +6076,6 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, /// assert!(map.is_empty() && map.capacity() == 0); /// /// map.entry_ref("poneyland").or_insert(12); - /// let capacity_before_remove = map.capacity(); /// /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") { /// assert_eq!(o.remove(), 12); @@ -6056,7 +6083,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, /// /// assert_eq!(map.contains_key("poneyland"), false); /// // Now map hold none elements but capacity is equal to the old one - /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// assert!(map.is_empty()); /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove(self) -> V { @@ -6068,7 +6095,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, /// /// # Panics /// - /// Will panic if this OccupiedEntry was created through [`EntryRef::insert`]. + /// Will panic if this OccupiedEntryRef was created through [`EntryRef::insert`]. /// /// # Examples /// @@ -6110,7 +6137,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, /// /// # Panics /// - /// Will panic if this OccupiedEntry was created through [`Entry::insert`]. + /// Will panic if this OccupiedEntryRef was created through [`EntryRef::insert`]. /// /// # Examples /// @@ -6138,7 +6165,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, /// fn reclaim_memory(map: &mut HashMap, usize>, keys: &[Rc]) { /// for key in keys { /// if let EntryRef::Occupied(entry) = map.entry_ref(key.as_ref()) { - /// /// Replaces the entry's key with our version of it in `keys`. + /// // Replaces the entry's key with our version of it in `keys`. /// entry.replace_key(); /// } /// } @@ -6204,8 +6231,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, #[cfg_attr(feature = "inline-more", inline)] pub fn replace_entry_with(self, f: F) -> EntryRef<'a, 'b, K, Q, V, S, A> where - F: FnOnce(&Q, V) -> Option, - K: Borrow, + F: FnOnce(&K, V) -> Option, { unsafe { let mut spare_key = None; @@ -6213,7 +6239,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, self.table .table .replace_bucket_with(self.elem.clone(), |(key, value)| { - if let Some(new_value) = f(key.borrow(), value) { + if let Some(new_value) = f(&key, value) { Some((key, new_value)) } else { spare_key = Some(KeyOrRef::Owned(key)); @@ -6234,7 +6260,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> VacantEntryRef<'a, 'b, K, Q, V, S, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `VacantEntryRef`. /// @@ -6305,7 +6331,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K, let entry = table.insert_entry( self.hash, (self.key.into_owned(), value), - make_hasher::(&self.table.hash_builder), + make_hasher::<_, V, S>(&self.table.hash_builder), ); &mut entry.1 } @@ -6319,7 +6345,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K, let elem = self.table.table.insert( self.hash, (self.key.into_owned(), value), - make_hasher::(&self.table.hash_builder), + make_hasher::<_, V, S>(&self.table.hash_builder), ); OccupiedEntryRef { hash: self.hash, @@ -6334,7 +6360,7 @@ impl FromIterator<(K, V)> for HashMap where K: Eq + Hash, S: BuildHasher + Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn from_iter>(iter: T) -> Self { @@ -6354,7 +6380,7 @@ impl Extend<(K, V)> for HashMap where K: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Inserts all new key-values from the iterator to existing `HashMap`. /// Replace values with existing keys with new values returned from the iterator. @@ -6438,7 +6464,7 @@ where K: Eq + Hash + Copy, V: Copy, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Inserts all new key-values from the iterator to existing `HashMap`. /// Replace values with existing keys with new values returned from the iterator. @@ -6455,17 +6481,17 @@ where /// map.insert(1, 100); /// /// let arr = [(1, 1), (2, 2)]; - /// let some_iter = arr.iter().map(|&(k, v)| (k, v)); + /// let some_iter = arr.iter().map(|(k, v)| (k, v)); /// map.extend(some_iter); /// // Replace values with existing keys with new values returned from the iterator. /// // So that the map.get(&1) doesn't return Some(&100). /// assert_eq!(map.get(&1), Some(&1)); /// /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; - /// map.extend(some_vec.iter().map(|&(k, v)| (k, v))); + /// map.extend(some_vec.iter().map(|(k, v)| (k, v))); /// /// let some_arr = [(5, 5), (6, 6)]; - /// map.extend(some_arr.iter().map(|&(k, v)| (k, v))); + /// map.extend(some_arr.iter().map(|(k, v)| (k, v))); /// /// // You can also extend from another HashMap /// let mut new_map = HashMap::new(); @@ -6503,7 +6529,7 @@ where K: Eq + Hash + Copy, V: Copy, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Inserts all new key-values from the iterator to existing `HashMap`. /// Replace values with existing keys with new values returned from the iterator. @@ -6570,12 +6596,12 @@ fn assert_covariance() { fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> { v } - fn into_iter_key<'new, A: Allocator + Clone>( + fn into_iter_key<'new, A: Allocator>( v: IntoIter<&'static str, u8, A>, ) -> IntoIter<&'new str, u8, A> { v } - fn into_iter_val<'new, A: Allocator + Clone>( + fn into_iter_val<'new, A: Allocator>( v: IntoIter, ) -> IntoIter { v @@ -6605,6 +6631,12 @@ mod test_map { use super::Entry::{Occupied, Vacant}; use super::EntryRef; use super::{HashMap, RawEntryMut}; + use alloc::string::{String, ToString}; + use alloc::sync::Arc; + use allocator_api2::alloc::{AllocError, Allocator, Global}; + use core::alloc::Layout; + use core::ptr::NonNull; + use core::sync::atomic::{AtomicI8, Ordering}; use rand::{rngs::SmallRng, Rng, SeedableRng}; use std::borrow::ToOwned; use std::cell::RefCell; @@ -6827,7 +6859,6 @@ mod test_map { } }); - #[allow(clippy::let_underscore_drop)] // kind-of a false positive for _ in half.by_ref() {} DROP_VECTOR.with(|v| { @@ -7155,10 +7186,10 @@ mod test_map { map.insert(1, 2); map.insert(3, 4); - let map_str = format!("{:?}", map); + let map_str = format!("{map:?}"); assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}"); - assert_eq!(format!("{:?}", empty), "{}"); + assert_eq!(format!("{empty:?}"), "{}"); } #[test] @@ -7474,7 +7505,7 @@ mod test_map { // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { - assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); + assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); } } @@ -7510,7 +7541,7 @@ mod test_map { // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { - assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); + assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); } } @@ -7559,6 +7590,7 @@ mod test_map { } #[test] + #[allow(clippy::needless_borrow)] fn test_extend_ref_kv_tuple() { use std::ops::AddAssign; let mut a = HashMap::new(); @@ -7580,7 +7612,7 @@ mod test_map { let vec: Vec<_> = (100..200).map(|i| (i, i)).collect(); a.extend(iter); a.extend(&vec); - a.extend(&create_arr::(200, 1)); + a.extend(create_arr::(200, 1)); assert_eq!(a.len(), 300); @@ -7981,7 +8013,7 @@ mod test_map { // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { - assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); + assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); } } @@ -8011,7 +8043,7 @@ mod test_map { // Test for #19292 fn check(m: &HashMap) { for k in m.keys() { - assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); + assert!(m.contains_key(k), "{k} is in keys() but not in the map?"); } } @@ -8049,10 +8081,10 @@ mod test_map { } #[test] - fn test_drain_filter() { + fn test_extract_if() { { let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); - let drained = map.drain_filter(|&k, _| k % 2 == 0); + let drained = map.extract_if(|&k, _| k % 2 == 0); let mut out = drained.collect::>(); out.sort_unstable(); assert_eq!(vec![(0, 0), (2, 20), (4, 40), (6, 60)], out); @@ -8060,7 +8092,7 @@ mod test_map { } { let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); - drop(map.drain_filter(|&k, _| k % 2 == 0)); + map.extract_if(|&k, _| k % 2 == 0).for_each(drop); assert_eq!(map.len(), 4); } } @@ -8070,27 +8102,32 @@ mod test_map { fn test_try_reserve() { use crate::TryReserveError::{AllocError, CapacityOverflow}; - const MAX_USIZE: usize = usize::MAX; + const MAX_ISIZE: usize = isize::MAX as usize; let mut empty_bytes: HashMap = HashMap::new(); - if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { + if let Err(CapacityOverflow) = empty_bytes.try_reserve(usize::MAX) { } else { panic!("usize::MAX should trigger an overflow!"); } - if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 16) { + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_ISIZE) { + } else { + panic!("isize::MAX should trigger an overflow!"); + } + + if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_ISIZE / 5) { } else { // This may succeed if there is enough free memory. Attempt to // allocate a few more hashmaps to ensure the allocation will fail. let mut empty_bytes2: HashMap = HashMap::new(); - let _ = empty_bytes2.try_reserve(MAX_USIZE / 16); + let _ = empty_bytes2.try_reserve(MAX_ISIZE / 5); let mut empty_bytes3: HashMap = HashMap::new(); - let _ = empty_bytes3.try_reserve(MAX_USIZE / 16); + let _ = empty_bytes3.try_reserve(MAX_ISIZE / 5); let mut empty_bytes4: HashMap = HashMap::new(); - if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_USIZE / 16) { + if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_ISIZE / 5) { } else { - panic!("usize::MAX / 8 should trigger an OOM!"); + panic!("isize::MAX / 5 should trigger an OOM!"); } } } @@ -8104,7 +8141,7 @@ mod test_map { let mut map: HashMap<_, _> = xs.iter().copied().collect(); let compute_hash = |map: &HashMap, k: i32| -> u64 { - super::make_insert_hash::(map.hasher(), &k) + super::make_hash::(map.hasher(), &k) }; // Existing key (insert) @@ -8266,21 +8303,21 @@ mod test_map { loop { // occasionally remove some elements if i < n && rng.gen_bool(0.1) { - let hash_value = super::make_insert_hash(&hash_builder, &i); + let hash_value = super::make_hash(&hash_builder, &i); unsafe { let e = map.table.find(hash_value, |q| q.0.eq(&i)); if let Some(e) = e { it.reflect_remove(&e); - let t = map.table.remove(e); + let t = map.table.remove(e).0; removed.push(t); left -= 1; } else { - assert!(removed.contains(&(i, 2 * i)), "{} not in {:?}", i, removed); + assert!(removed.contains(&(i, 2 * i)), "{i} not in {removed:?}"); let e = map.table.insert( hash_value, (i, 2 * i), - super::make_hasher::(&hash_builder), + super::make_hasher::<_, usize, _>(&hash_builder), ); it.reflect_insert(&e); if let Some(p) = removed.iter().position(|e| e == &(i, 2 * i)) { @@ -8405,4 +8442,441 @@ mod test_map { map2.clone_from(&map1); } + + #[test] + #[should_panic = "panic in clone"] + fn test_clone_from_memory_leaks() { + use ::alloc::vec::Vec; + + struct CheckedClone { + panic_in_clone: bool, + need_drop: Vec, + } + impl Clone for CheckedClone { + fn clone(&self) -> Self { + if self.panic_in_clone { + panic!("panic in clone") + } + Self { + panic_in_clone: self.panic_in_clone, + need_drop: self.need_drop.clone(), + } + } + } + let mut map1 = HashMap::new(); + map1.insert( + 1, + CheckedClone { + panic_in_clone: false, + need_drop: vec![0, 1, 2], + }, + ); + map1.insert( + 2, + CheckedClone { + panic_in_clone: false, + need_drop: vec![3, 4, 5], + }, + ); + map1.insert( + 3, + CheckedClone { + panic_in_clone: true, + need_drop: vec![6, 7, 8], + }, + ); + let _map2 = map1.clone(); + } + + struct MyAllocInner { + drop_count: Arc, + } + + #[derive(Clone)] + struct MyAlloc { + _inner: Arc, + } + + impl MyAlloc { + fn new(drop_count: Arc) -> Self { + MyAlloc { + _inner: Arc::new(MyAllocInner { drop_count }), + } + } + } + + impl Drop for MyAllocInner { + fn drop(&mut self) { + println!("MyAlloc freed."); + self.drop_count.fetch_sub(1, Ordering::SeqCst); + } + } + + unsafe impl Allocator for MyAlloc { + fn allocate(&self, layout: Layout) -> std::result::Result, AllocError> { + let g = Global; + g.allocate(layout) + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + let g = Global; + g.deallocate(ptr, layout) + } + } + + #[test] + fn test_hashmap_into_iter_bug() { + let dropped: Arc = Arc::new(AtomicI8::new(1)); + + { + let mut map = HashMap::with_capacity_in(10, MyAlloc::new(dropped.clone())); + for i in 0..10 { + map.entry(i).or_insert_with(|| "i".to_string()); + } + + for (k, v) in map { + println!("{}, {}", k, v); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } + + #[derive(Debug)] + struct CheckedCloneDrop { + panic_in_clone: bool, + panic_in_drop: bool, + dropped: bool, + data: T, + } + + impl CheckedCloneDrop { + fn new(panic_in_clone: bool, panic_in_drop: bool, data: T) -> Self { + CheckedCloneDrop { + panic_in_clone, + panic_in_drop, + dropped: false, + data, + } + } + } + + impl Clone for CheckedCloneDrop { + fn clone(&self) -> Self { + if self.panic_in_clone { + panic!("panic in clone") + } + Self { + panic_in_clone: self.panic_in_clone, + panic_in_drop: self.panic_in_drop, + dropped: self.dropped, + data: self.data.clone(), + } + } + } + + impl Drop for CheckedCloneDrop { + fn drop(&mut self) { + if self.panic_in_drop { + self.dropped = true; + panic!("panic in drop"); + } + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } + + /// Return hashmap with predefined distribution of elements. + /// All elements will be located in the same order as elements + /// returned by iterator. + /// + /// This function does not panic, but returns an error as a `String` + /// to distinguish between a test panic and an error in the input data. + fn get_test_map( + iter: I, + mut fun: impl FnMut(u64) -> T, + alloc: A, + ) -> Result, DefaultHashBuilder, A>, String> + where + I: Iterator + Clone + ExactSizeIterator, + A: Allocator, + T: PartialEq + core::fmt::Debug, + { + use crate::scopeguard::guard; + + let mut map: HashMap, _, A> = + HashMap::with_capacity_in(iter.size_hint().0, alloc); + { + let mut guard = guard(&mut map, |map| { + for (_, value) in map.iter_mut() { + value.panic_in_drop = false + } + }); + + let mut count = 0; + // Hash and Key must be equal to each other for controlling the elements placement. + for (panic_in_clone, panic_in_drop) in iter.clone() { + if core::mem::needs_drop::() && panic_in_drop { + return Err(String::from( + "panic_in_drop can be set with a type that doesn't need to be dropped", + )); + } + guard.table.insert( + count, + ( + count, + CheckedCloneDrop::new(panic_in_clone, panic_in_drop, fun(count)), + ), + |(k, _)| *k, + ); + count += 1; + } + + // Let's check that all elements are located as we wanted + let mut check_count = 0; + for ((key, value), (panic_in_clone, panic_in_drop)) in guard.iter().zip(iter) { + if *key != check_count { + return Err(format!( + "key != check_count,\nkey: `{}`,\ncheck_count: `{}`", + key, check_count + )); + } + if value.dropped + || value.panic_in_clone != panic_in_clone + || value.panic_in_drop != panic_in_drop + || value.data != fun(check_count) + { + return Err(format!( + "Value is not equal to expected,\nvalue: `{:?}`,\nexpected: \ + `CheckedCloneDrop {{ panic_in_clone: {}, panic_in_drop: {}, dropped: {}, data: {:?} }}`", + value, panic_in_clone, panic_in_drop, false, fun(check_count) + )); + } + check_count += 1; + } + + if guard.len() != check_count as usize { + return Err(format!( + "map.len() != check_count,\nmap.len(): `{}`,\ncheck_count: `{}`", + guard.len(), + check_count + )); + } + + if count != check_count { + return Err(format!( + "count != check_count,\ncount: `{}`,\ncheck_count: `{}`", + count, check_count + )); + } + core::mem::forget(guard); + } + Ok(map) + } + + const DISARMED: bool = false; + const ARMED: bool = true; + + const ARMED_FLAGS: [bool; 8] = [ + DISARMED, DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + + const DISARMED_FLAGS: [bool; 8] = [ + DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + + #[test] + #[should_panic = "panic in clone"] + fn test_clone_memory_leaks_and_double_drop_one() { + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let map: HashMap>, DefaultHashBuilder, MyAlloc> = + match get_test_map( + ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + // Clone should normally clone a few elements, and then (when the + // clone function panics), deallocate both its own memory, memory + // of `dropped: Arc` and the memory of already cloned + // elements (Vec memory inside CheckedCloneDrop). + let _map2 = map.clone(); + } + } + + #[test] + #[should_panic = "panic in drop"] + fn test_clone_memory_leaks_and_double_drop_two() { + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let map: HashMap, DefaultHashBuilder, _> = match get_test_map( + DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| n, + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + let mut map2 = match get_test_map( + DISARMED_FLAGS.into_iter().zip(ARMED_FLAGS), + |n| n, + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + // The `clone_from` should try to drop the elements of `map2` without + // double drop and leaking the allocator. Elements that have not been + // dropped leak their memory. + map2.clone_from(&map); + } + } + + /// We check that we have a working table if the clone operation from another + /// thread ended in a panic (when buckets of maps are equal to each other). + #[test] + fn test_catch_panic_clone_from_when_len_is_equal() { + use std::thread; + + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let mut map = match get_test_map( + DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + thread::scope(|s| { + let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| { + let scope_map = + match get_test_map(ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), |n| vec![n * 2], MyAlloc::new(dropped.clone())) { + Ok(map) => map, + Err(msg) => return msg, + }; + if map.table.buckets() != scope_map.table.buckets() { + return format!( + "map.table.buckets() != scope_map.table.buckets(),\nleft: `{}`,\nright: `{}`", + map.table.buckets(), scope_map.table.buckets() + ); + } + map.clone_from(&scope_map); + "We must fail the cloning!!!".to_owned() + }); + if let Ok(msg) = result.join() { + panic!("{msg}") + } + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(map.len(), 0); + assert_eq!(map.iter().count(), 0); + assert_eq!(unsafe { map.table.iter().count() }, 0); + assert_eq!(unsafe { map.table.iter().iter.count() }, 0); + + for idx in 0..map.table.buckets() { + let idx = idx as u64; + assert!( + map.table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } + + /// We check that we have a working table if the clone operation from another + /// thread ended in a panic (when buckets of maps are not equal to each other). + #[test] + fn test_catch_panic_clone_from_when_len_is_not_equal() { + use std::thread; + + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let mut map = match get_test_map( + [DISARMED].into_iter().zip([DISARMED]), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + thread::scope(|s| { + let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| { + let scope_map = match get_test_map( + ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n * 2], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => return msg, + }; + if map.table.buckets() == scope_map.table.buckets() { + return format!( + "map.table.buckets() == scope_map.table.buckets(): `{}`", + map.table.buckets() + ); + } + map.clone_from(&scope_map); + "We must fail the cloning!!!".to_owned() + }); + if let Ok(msg) = result.join() { + panic!("{msg}") + } + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(map.len(), 0); + assert_eq!(map.iter().count(), 0); + assert_eq!(unsafe { map.table.iter().count() }, 0); + assert_eq!(unsafe { map.table.iter().iter.count() }, 0); + + for idx in 0..map.table.buckets() { + let idx = idx as u64; + assert!( + map.table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } } diff --git a/vendor/hashbrown/src/raw/alloc.rs b/vendor/hashbrown/src/raw/alloc.rs index ba09ea9..15299e7 100644 --- a/vendor/hashbrown/src/raw/alloc.rs +++ b/vendor/hashbrown/src/raw/alloc.rs @@ -1,5 +1,9 @@ pub(crate) use self::inner::{do_alloc, Allocator, Global}; +// Nightly-case. +// Use unstable `allocator_api` feature. +// This is compatible with `allocator-api2` which can be enabled or not. +// This is used when building for `std`. #[cfg(feature = "nightly")] mod inner { use crate::alloc::alloc::Layout; @@ -7,28 +11,44 @@ mod inner { use core::ptr::NonNull; #[allow(clippy::map_err_ignore)] - pub fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { match alloc.allocate(layout) { Ok(ptr) => Ok(ptr.as_non_null_ptr()), Err(_) => Err(()), } } +} - #[cfg(feature = "bumpalo")] - unsafe impl Allocator for crate::BumpWrapper<'_> { - #[inline] - fn allocate(&self, layout: Layout) -> Result, core::alloc::AllocError> { - match self.0.try_alloc_layout(layout) { - Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())), - Err(_) => Err(core::alloc::AllocError), - } +// Basic non-nightly case. +// This uses `allocator-api2` enabled by default. +// If any crate enables "nightly" in `allocator-api2`, +// this will be equivalent to the nightly case, +// since `allocator_api2::alloc::Allocator` would be re-export of +// `core::alloc::Allocator`. +#[cfg(all(not(feature = "nightly"), feature = "allocator-api2"))] +mod inner { + use crate::alloc::alloc::Layout; + pub use allocator_api2::alloc::{Allocator, Global}; + use core::ptr::NonNull; + + #[allow(clippy::map_err_ignore)] + pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + match alloc.allocate(layout) { + Ok(ptr) => Ok(ptr.cast()), + Err(_) => Err(()), } - #[inline] - unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) {} } } -#[cfg(not(feature = "nightly"))] +// No-defaults case. +// When building with default-features turned off and +// neither `nightly` nor `allocator-api2` is enabled, +// this will be used. +// Making it impossible to use any custom allocator with collections defined +// in this crate. +// Any crate in build-tree can enable `allocator-api2`, +// or `nightly` without disturbing users that don't want to use it. +#[cfg(not(any(feature = "nightly", feature = "allocator-api2")))] mod inner { use crate::alloc::alloc::{alloc, dealloc, Layout}; use core::ptr::NonNull; @@ -41,6 +61,7 @@ mod inner { #[derive(Copy, Clone)] pub struct Global; + unsafe impl Allocator for Global { #[inline] fn allocate(&self, layout: Layout) -> Result, ()> { @@ -51,6 +72,7 @@ mod inner { dealloc(ptr.as_ptr(), layout); } } + impl Default for Global { #[inline] fn default() -> Self { @@ -58,16 +80,7 @@ mod inner { } } - pub fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + pub(crate) fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { alloc.allocate(layout) } - - #[cfg(feature = "bumpalo")] - unsafe impl Allocator for crate::BumpWrapper<'_> { - #[allow(clippy::map_err_ignore)] - fn allocate(&self, layout: Layout) -> Result, ()> { - self.0.try_alloc_layout(layout).map_err(|_| ()) - } - unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) {} - } } diff --git a/vendor/hashbrown/src/raw/bitmask.rs b/vendor/hashbrown/src/raw/bitmask.rs index 7d4f9fc..6576b3c 100644 --- a/vendor/hashbrown/src/raw/bitmask.rs +++ b/vendor/hashbrown/src/raw/bitmask.rs @@ -1,6 +1,6 @@ -use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE}; -#[cfg(feature = "nightly")] -use core::intrinsics; +use super::imp::{ + BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE, +}; /// A bit mask which contains the result of a `Match` operation on a `Group` and /// allows iterating through them. @@ -8,75 +8,55 @@ use core::intrinsics; /// The bit mask is arranged so that low-order bits represent lower memory /// addresses for group match results. /// -/// For implementation reasons, the bits in the set may be sparsely packed, so -/// that there is only one bit-per-byte used (the high bit, 7). If this is the +/// For implementation reasons, the bits in the set may be sparsely packed with +/// groups of 8 bits representing one element. If any of these bits are non-zero +/// then this element is considered to true in the mask. If this is the /// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be /// performed on counts/indices to normalize this difference. `BITMASK_MASK` is /// similarly a mask of all the actually-used bits. +/// +/// To iterate over a bit mask, it must be converted to a form where only 1 bit +/// is set per element. This is done by applying `BITMASK_ITER_MASK` on the +/// mask bits. #[derive(Copy, Clone)] -pub struct BitMask(pub BitMaskWord); +pub(crate) struct BitMask(pub(crate) BitMaskWord); #[allow(clippy::use_self)] impl BitMask { /// Returns a new `BitMask` with all bits inverted. #[inline] #[must_use] - pub fn invert(self) -> Self { + #[allow(dead_code)] + pub(crate) fn invert(self) -> Self { BitMask(self.0 ^ BITMASK_MASK) } - /// Flip the bit in the mask for the entry at the given index. - /// - /// Returns the bit's previous state. - #[inline] - #[allow(clippy::cast_ptr_alignment)] - #[cfg(feature = "raw")] - pub unsafe fn flip(&mut self, index: usize) -> bool { - // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit. - let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1); - self.0 ^= mask; - // The bit was set if the bit is now 0. - self.0 & mask == 0 - } - /// Returns a new `BitMask` with the lowest bit removed. #[inline] #[must_use] - pub fn remove_lowest_bit(self) -> Self { + fn remove_lowest_bit(self) -> Self { BitMask(self.0 & (self.0 - 1)) } + /// Returns whether the `BitMask` has at least one set bit. #[inline] - pub fn any_bit_set(self) -> bool { + pub(crate) fn any_bit_set(self) -> bool { self.0 != 0 } /// Returns the first set bit in the `BitMask`, if there is one. #[inline] - pub fn lowest_set_bit(self) -> Option { - if self.0 == 0 { - None + pub(crate) fn lowest_set_bit(self) -> Option { + if let Some(nonzero) = NonZeroBitMaskWord::new(self.0) { + Some(Self::nonzero_trailing_zeros(nonzero)) } else { - Some(unsafe { self.lowest_set_bit_nonzero() }) + None } } - /// Returns the first set bit in the `BitMask`, if there is one. The - /// bitmask must not be empty. - #[inline] - #[cfg(feature = "nightly")] - pub unsafe fn lowest_set_bit_nonzero(self) -> usize { - intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE - } - #[inline] - #[cfg(not(feature = "nightly"))] - pub unsafe fn lowest_set_bit_nonzero(self) -> usize { - self.trailing_zeros() - } - /// Returns the number of trailing zeroes in the `BitMask`. #[inline] - pub fn trailing_zeros(self) -> usize { + pub(crate) fn trailing_zeros(self) -> usize { // ARM doesn't have a trailing_zeroes instruction, and instead uses // reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM // versions (pre-ARMv7) don't have RBIT and need to emulate it @@ -89,9 +69,21 @@ impl BitMask { } } + /// Same as above but takes a `NonZeroBitMaskWord`. + #[inline] + fn nonzero_trailing_zeros(nonzero: NonZeroBitMaskWord) -> usize { + if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { + // SAFETY: A byte-swapped non-zero value is still non-zero. + let swapped = unsafe { NonZeroBitMaskWord::new_unchecked(nonzero.get().swap_bytes()) }; + swapped.leading_zeros() as usize / BITMASK_STRIDE + } else { + nonzero.trailing_zeros() as usize / BITMASK_STRIDE + } + } + /// Returns the number of leading zeroes in the `BitMask`. #[inline] - pub fn leading_zeros(self) -> usize { + pub(crate) fn leading_zeros(self) -> usize { self.0.leading_zeros() as usize / BITMASK_STRIDE } } @@ -102,13 +94,32 @@ impl IntoIterator for BitMask { #[inline] fn into_iter(self) -> BitMaskIter { - BitMaskIter(self) + // A BitMask only requires each element (group of bits) to be non-zero. + // However for iteration we need each element to only contain 1 bit. + BitMaskIter(BitMask(self.0 & BITMASK_ITER_MASK)) } } /// Iterator over the contents of a `BitMask`, returning the indices of set /// bits. -pub struct BitMaskIter(BitMask); +#[derive(Copy, Clone)] +pub(crate) struct BitMaskIter(pub(crate) BitMask); + +impl BitMaskIter { + /// Flip the bit in the mask for the entry at the given index. + /// + /// Returns the bit's previous state. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + #[cfg(feature = "raw")] + pub(crate) unsafe fn flip(&mut self, index: usize) -> bool { + // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit. + let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1); + self.0 .0 ^= mask; + // The bit was set if the bit is now 0. + self.0 .0 & mask == 0 + } +} impl Iterator for BitMaskIter { type Item = usize; diff --git a/vendor/hashbrown/src/raw/generic.rs b/vendor/hashbrown/src/raw/generic.rs index b4d31e6..c668b06 100644 --- a/vendor/hashbrown/src/raw/generic.rs +++ b/vendor/hashbrown/src/raw/generic.rs @@ -5,26 +5,29 @@ use core::{mem, ptr}; // Use the native word size as the group size. Using a 64-bit group size on // a 32-bit architecture will just end up being more expensive because // shifts and multiplies will need to be emulated. -#[cfg(any( - target_pointer_width = "64", - target_arch = "aarch64", - target_arch = "x86_64", - target_arch = "wasm32", -))] -type GroupWord = u64; -#[cfg(all( - target_pointer_width = "32", - not(target_arch = "aarch64"), - not(target_arch = "x86_64"), - not(target_arch = "wasm32"), -))] -type GroupWord = u32; -pub type BitMaskWord = GroupWord; -pub const BITMASK_STRIDE: usize = 8; +cfg_if! { + if #[cfg(any( + target_pointer_width = "64", + target_arch = "aarch64", + target_arch = "x86_64", + target_arch = "wasm32", + ))] { + type GroupWord = u64; + type NonZeroGroupWord = core::num::NonZeroU64; + } else { + type GroupWord = u32; + type NonZeroGroupWord = core::num::NonZeroU32; + } +} + +pub(crate) type BitMaskWord = GroupWord; +pub(crate) type NonZeroBitMaskWord = NonZeroGroupWord; +pub(crate) const BITMASK_STRIDE: usize = 8; // We only care about the highest bit of each byte for the mask. #[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)] -pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord; +pub(crate) const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord; +pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; /// Helper function to replicate a byte across a `GroupWord`. #[inline] @@ -37,7 +40,7 @@ fn repeat(byte: u8) -> GroupWord { /// /// This implementation uses a word-sized integer. #[derive(Copy, Clone)] -pub struct Group(GroupWord); +pub(crate) struct Group(GroupWord); // We perform all operations in the native endianness, and convert to // little-endian just before creating a BitMask. The can potentially @@ -46,14 +49,14 @@ pub struct Group(GroupWord); #[allow(clippy::use_self)] impl Group { /// Number of bytes in the group. - pub const WIDTH: usize = mem::size_of::(); + pub(crate) const WIDTH: usize = mem::size_of::(); /// Returns a full group of empty bytes, suitable for use as the initial /// value for an empty hash table. /// /// This is guaranteed to be aligned to the group size. #[inline] - pub const fn static_empty() -> &'static [u8; Group::WIDTH] { + pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] { #[repr(C)] struct AlignedBytes { _align: [Group; 0], @@ -69,7 +72,7 @@ impl Group { /// Loads a group of bytes starting at the given address. #[inline] #[allow(clippy::cast_ptr_alignment)] // unaligned load - pub unsafe fn load(ptr: *const u8) -> Self { + pub(crate) unsafe fn load(ptr: *const u8) -> Self { Group(ptr::read_unaligned(ptr.cast())) } @@ -77,7 +80,7 @@ impl Group { /// aligned to `mem::align_of::()`. #[inline] #[allow(clippy::cast_ptr_alignment)] - pub unsafe fn load_aligned(ptr: *const u8) -> Self { + pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self { // FIXME: use align_offset once it stabilizes debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); Group(ptr::read(ptr.cast())) @@ -87,7 +90,7 @@ impl Group { /// aligned to `mem::align_of::()`. #[inline] #[allow(clippy::cast_ptr_alignment)] - pub unsafe fn store_aligned(self, ptr: *mut u8) { + pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) { // FIXME: use align_offset once it stabilizes debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); ptr::write(ptr.cast(), self.0); @@ -104,7 +107,7 @@ impl Group { /// - This only happens if there is at least 1 true match. /// - The chance of this happening is very low (< 1% chance per byte). #[inline] - pub fn match_byte(self, byte: u8) -> BitMask { + pub(crate) fn match_byte(self, byte: u8) -> BitMask { // This algorithm is derived from // https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord let cmp = self.0 ^ repeat(byte); @@ -114,7 +117,7 @@ impl Group { /// Returns a `BitMask` indicating all bytes in the group which are /// `EMPTY`. #[inline] - pub fn match_empty(self) -> BitMask { + pub(crate) fn match_empty(self) -> BitMask { // If the high bit is set, then the byte must be either: // 1111_1111 (EMPTY) or 1000_0000 (DELETED). // So we can just check if the top two bits are 1 by ANDing them. @@ -124,14 +127,14 @@ impl Group { /// Returns a `BitMask` indicating all bytes in the group which are /// `EMPTY` or `DELETED`. #[inline] - pub fn match_empty_or_deleted(self) -> BitMask { + pub(crate) fn match_empty_or_deleted(self) -> BitMask { // A byte is EMPTY or DELETED iff the high bit is set BitMask((self.0 & repeat(0x80)).to_le()) } /// Returns a `BitMask` indicating all bytes in the group which are full. #[inline] - pub fn match_full(self) -> BitMask { + pub(crate) fn match_full(self) -> BitMask { self.match_empty_or_deleted().invert() } @@ -140,7 +143,7 @@ impl Group { /// - `DELETED => EMPTY` /// - `FULL => DELETED` #[inline] - pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 // and high_bit = 0 (FULL) to 1000_0000 // diff --git a/vendor/hashbrown/src/raw/mod.rs b/vendor/hashbrown/src/raw/mod.rs index 211b818..25c5d1c 100644 --- a/vendor/hashbrown/src/raw/mod.rs +++ b/vendor/hashbrown/src/raw/mod.rs @@ -4,7 +4,6 @@ use crate::TryReserveError; use core::iter::FusedIterator; use core::marker::PhantomData; use core::mem; -use core::mem::ManuallyDrop; use core::mem::MaybeUninit; use core::ptr::NonNull; use core::{hint, ptr}; @@ -21,12 +20,21 @@ cfg_if! { if #[cfg(all( target_feature = "sse2", any(target_arch = "x86", target_arch = "x86_64"), - not(miri) + not(miri), ))] { mod sse2; use sse2 as imp; + } else if #[cfg(all( + target_arch = "aarch64", + target_feature = "neon", + // NEON intrinsics are currently broken on big-endian targets. + // See https://github.com/rust-lang/stdarch/issues/1484. + target_endian = "little", + not(miri), + ))] { + mod neon; + use neon as imp; } else { - #[path = "generic.rs"] mod generic; use generic as imp; } @@ -37,36 +45,26 @@ pub(crate) use self::alloc::{do_alloc, Allocator, Global}; mod bitmask; -use self::bitmask::{BitMask, BitMaskIter}; +use self::bitmask::BitMaskIter; use self::imp::Group; // Branch prediction hint. This is currently only available on nightly but it // consistently improves performance by 10-15%. +#[cfg(not(feature = "nightly"))] +use core::convert::identity as likely; +#[cfg(not(feature = "nightly"))] +use core::convert::identity as unlikely; #[cfg(feature = "nightly")] use core::intrinsics::{likely, unlikely}; -// On stable we can use #[cold] to get a equivalent effect: this attributes -// suggests that the function is unlikely to be called -#[cfg(not(feature = "nightly"))] -#[inline] -#[cold] -fn cold() {} - -#[cfg(not(feature = "nightly"))] -#[inline] -fn likely(b: bool) -> bool { - if !b { - cold(); - } - b -} +// Use strict provenance functions if available. +#[cfg(feature = "nightly")] +use core::ptr::invalid_mut; +// Implement it with a cast otherwise. #[cfg(not(feature = "nightly"))] -#[inline] -fn unlikely(b: bool) -> bool { - if b { - cold(); - } - b +#[inline(always)] +fn invalid_mut(addr: usize) -> *mut T { + addr as *mut T } #[inline] @@ -101,6 +99,13 @@ impl Fallibility { } } +trait SizedTypeProperties: Sized { + const IS_ZERO_SIZED: bool = mem::size_of::() == 0; + const NEEDS_DROP: bool = mem::needs_drop::(); +} + +impl SizedTypeProperties for T {} + /// Control byte value for an empty bucket. const EMPTY: u8 = 0b1111_1111; @@ -134,6 +139,13 @@ fn h1(hash: u64) -> usize { hash as usize } +// Constant for h2 function that grabing the top 7 bits of the hash. +const MIN_HASH_LEN: usize = if mem::size_of::() < mem::size_of::() { + mem::size_of::() +} else { + mem::size_of::() +}; + /// Secondary hash function, saved in the low 7 bits of the control byte. #[inline] #[allow(clippy::cast_possible_truncation)] @@ -141,8 +153,8 @@ fn h2(hash: u64) -> u8 { // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit // value, some hash functions (such as FxHash) produce a usize result // instead, which means that the top 32 bits are 0 on 32-bit platforms. - let hash_len = usize::min(mem::size_of::(), mem::size_of::()); - let top7 = hash >> (hash_len * 8 - 7); + // So we use MIN_HASH_LEN constant to handle this. + let top7 = hash >> (MIN_HASH_LEN * 8 - 7); (top7 & 0x7f) as u8 // truncation } @@ -230,11 +242,15 @@ struct TableLayout { impl TableLayout { #[inline] - fn new() -> Self { + const fn new() -> Self { let layout = Layout::new::(); Self { size: layout.size(), - ctrl_align: usize::max(layout.align(), Group::WIDTH), + ctrl_align: if layout.align() > Group::WIDTH { + layout.align() + } else { + Group::WIDTH + }, } } @@ -248,6 +264,12 @@ impl TableLayout { size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1); let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; + // We need an additional check to ensure that the allocation doesn't + // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295). + if len > isize::MAX as usize - (ctrl_align - 1) { + return None; + } + Some(( unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, ctrl_offset, @@ -255,14 +277,9 @@ impl TableLayout { } } -/// Returns a Layout which describes the allocation required for a hash table, -/// and the offset of the control bytes in the allocation. -/// (the offset is also one past last element of buckets) -/// -/// Returns `None` if an overflow occurs. -#[cfg_attr(feature = "inline-more", inline)] -fn calculate_layout(buckets: usize) -> Option<(Layout, usize)> { - TableLayout::new::().calculate_layout_for(buckets) +/// A reference to an empty bucket into which an can be inserted. +pub struct InsertSlot { + index: usize, } /// A reference to a hash table bucket containing a `T`. @@ -290,11 +307,79 @@ impl Clone for Bucket { } impl Bucket { + /// Creates a [`Bucket`] that contain pointer to the data. + /// The pointer calculation is performed by calculating the + /// offset from given `base` pointer (convenience for + /// `base.as_ptr().sub(index)`). + /// + /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// If the `T` is a ZST, then we instead track the index of the element + /// in the table so that `erase` works properly (return + /// `NonNull::new_unchecked((index + 1) as *mut T)`) + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived + /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety + /// rules of [`NonNull::new_unchecked`] function. + /// + /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method + /// and [`NonNull::new_unchecked`] function, as well as for the correct + /// logic of the work of this crate, the following rules are necessary and + /// sufficient: + /// + /// * the `base` pointer must not be `dangling` and must points to the + /// end of the first `value element` from the `data part` of the table, i.e. + /// must be the pointer that returned by [`RawTable::data_end`] or by + /// [`RawTableInner::data_end`]; + /// + /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` + /// must be no greater than the number returned by the function + /// [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// If `mem::size_of::() == 0`, then the only requirement is that the + /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` + /// must be no greater than the number returned by the function + /// [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// [`Bucket`]: crate::raw::Bucket + /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1 + /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked + /// [`RawTable::data_end`]: crate::raw::RawTable::data_end + /// [`RawTableInner::data_end`]: RawTableInner::data_end + /// [`RawTable::buckets`]: crate::raw::RawTable::buckets + /// [`RawTableInner::buckets`]: RawTableInner::buckets #[inline] unsafe fn from_base_index(base: NonNull, index: usize) -> Self { - let ptr = if mem::size_of::() == 0 { - // won't overflow because index must be less than length - (index + 1) as *mut T + // If mem::size_of::() != 0 then return a pointer to an `element` in + // the data part of the table (we start counting from "0", so that + // in the expression T[last], the "last" index actually one less than the + // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"): + // + // `from_base_index(base, 1).as_ptr()` returns a pointer that + // points here in the data part of the table + // (to the start of T1) + // | + // | `base: NonNull` must point here + // | (to the end of T0 or to the start of C0) + // v v + // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast + // ^ + // `from_base_index(base, 1)` returns a pointer + // that points here in the data part of the table + // (to the end of T1) + // + // where: T0...Tlast - our stored data; C0...Clast - control bytes + // or metadata for data. + let ptr = if T::IS_ZERO_SIZED { + // won't overflow because index must be less than length (bucket_mask) + // and bucket_mask is guaranteed to be less than `isize::MAX` + // (see TableLayout::calculate_layout_for method) + invalid_mut(index + 1) } else { base.as_ptr().sub(index) }; @@ -302,27 +387,183 @@ impl Bucket { ptr: NonNull::new_unchecked(ptr), } } + + /// Calculates the index of a [`Bucket`] as distance between two pointers + /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`). + /// The returned value is in units of T: the distance in bytes divided by + /// [`core::mem::size_of::()`]. + /// + /// If the `T` is a ZST, then we return the index of the element in + /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`). + /// + /// This function is the inverse of [`from_base_index`]. + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived + /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`. + /// + /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`] + /// method, as well as for the correct logic of the work of this crate, the + /// following rules are necessary and sufficient: + /// + /// * `base` contained pointer must not be `dangling` and must point to the + /// end of the first `element` from the `data part` of the table, i.e. + /// must be a pointer that returns by [`RawTable::data_end`] or by + /// [`RawTableInner::data_end`]; + /// + /// * `self` also must not contain dangling pointer; + /// + /// * both `self` and `base` must be created from the same [`RawTable`] + /// (or [`RawTableInner`]). + /// + /// If `mem::size_of::() == 0`, this function is always safe. + /// + /// [`Bucket`]: crate::raw::Bucket + /// [`from_base_index`]: crate::raw::Bucket::from_base_index + /// [`RawTable::data_end`]: crate::raw::RawTable::data_end + /// [`RawTableInner::data_end`]: RawTableInner::data_end + /// [`RawTable`]: crate::raw::RawTable + /// [`RawTableInner`]: RawTableInner + /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from #[inline] unsafe fn to_base_index(&self, base: NonNull) -> usize { - if mem::size_of::() == 0 { + // If mem::size_of::() != 0 then return an index under which we used to store the + // `element` in the data part of the table (we start counting from "0", so + // that in the expression T[last], the "last" index actually is one less than the + // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"). + // For example for 5th element in table calculation is performed like this: + // + // mem::size_of::() + // | + // | `self = from_base_index(base, 5)` that returns pointer + // | that points here in tha data part of the table + // | (to the end of T5) + // | | `base: NonNull` must point here + // v | (to the end of T0 or to the start of C0) + // /???\ v v + // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast + // \__________ __________/ + // \/ + // `bucket.to_base_index(base)` = 5 + // (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::() + // + // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data. + if T::IS_ZERO_SIZED { + // this can not be UB self.ptr.as_ptr() as usize - 1 } else { offset_from(base.as_ptr(), self.ptr.as_ptr()) } } + + /// Acquires the underlying raw pointer `*mut T` to `data`. + /// + /// # Note + /// + /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the + /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because + /// for properly dropping the data we also need to clear `data` control bytes. If we + /// drop data, but do not clear `data control byte` it leads to double drop when + /// [`RawTable`] goes out of scope. + /// + /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new + /// `T` value and its borrowed form *must* match those for the old `T` value, as the map + /// will not re-evaluate where the new value should go, meaning the value may become + /// "lost" if their location does not reflect their state. + /// + /// [`RawTable`]: crate::raw::RawTable + /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "raw")] + /// # fn test() { + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::raw::{Bucket, RawTable}; + /// + /// type NewHashBuilder = core::hash::BuildHasherDefault; + /// + /// fn make_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let hash_builder = NewHashBuilder::default(); + /// let mut table = RawTable::new(); + /// + /// let value = ("a", 100); + /// let hash = make_hash(&hash_builder, &value.0); + /// + /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0)); + /// + /// let bucket: Bucket<(&str, i32)> = table.find(hash, |(k1, _)| k1 == &value.0).unwrap(); + /// + /// assert_eq!(unsafe { &*bucket.as_ptr() }, &("a", 100)); + /// # } + /// # fn main() { + /// # #[cfg(feature = "raw")] + /// # test() + /// # } + /// ``` #[inline] pub fn as_ptr(&self) -> *mut T { - if mem::size_of::() == 0 { + if T::IS_ZERO_SIZED { // Just return an arbitrary ZST pointer which is properly aligned - mem::align_of::() as *mut T + // invalid pointer is good enough for ZST + invalid_mut(mem::align_of::()) } else { unsafe { self.ptr.as_ptr().sub(1) } } } + + /// Create a new [`Bucket`] that is offset from the `self` by the given + /// `offset`. The pointer calculation is performed by calculating the + /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`). + /// This function is used for iterators. + /// + /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer + /// offset of `3 * size_of::()` bytes. + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived + /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety + /// rules of [`NonNull::new_unchecked`] function. + /// + /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method + /// and [`NonNull::new_unchecked`] function, as well as for the correct + /// logic of the work of this crate, the following rules are necessary and + /// sufficient: + /// + /// * `self` contained pointer must not be `dangling`; + /// + /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`, + /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other + /// words, `self.to_base_index() + ofset + 1` must be no greater than the number returned + /// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// If `mem::size_of::() == 0`, then the only requirement is that the + /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`, + /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words, + /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the + /// function [`RawTable::buckets`] or [`RawTableInner::buckets`]. + /// + /// [`Bucket`]: crate::raw::Bucket + /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1 + /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked + /// [`RawTable::buckets`]: crate::raw::RawTable::buckets + /// [`RawTableInner::buckets`]: RawTableInner::buckets #[inline] unsafe fn next_n(&self, offset: usize) -> Self { - let ptr = if mem::size_of::() == 0 { - (self.ptr.as_ptr() as usize + offset) as *mut T + let ptr = if T::IS_ZERO_SIZED { + // invalid pointer is good enough for ZST + invalid_mut(self.ptr.as_ptr() as usize + offset) } else { self.ptr.as_ptr().sub(offset) }; @@ -330,26 +571,212 @@ impl Bucket { ptr: NonNull::new_unchecked(ptr), } } + + /// Executes the destructor (if any) of the pointed-to `data`. + /// + /// # Safety + /// + /// See [`ptr::drop_in_place`] for safety concerns. + /// + /// You should use [`RawTable::erase`] instead of this function, + /// or be careful with calling this function directly, because for + /// properly dropping the data we need also clear `data` control bytes. + /// If we drop data, but do not erase `data control byte` it leads to + /// double drop when [`RawTable`] goes out of scope. + /// + /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html + /// [`RawTable`]: crate::raw::RawTable + /// [`RawTable::erase`]: crate::raw::RawTable::erase #[cfg_attr(feature = "inline-more", inline)] - pub unsafe fn drop(&self) { + pub(crate) unsafe fn drop(&self) { self.as_ptr().drop_in_place(); } + + /// Reads the `value` from `self` without moving it. This leaves the + /// memory in `self` unchanged. + /// + /// # Safety + /// + /// See [`ptr::read`] for safety concerns. + /// + /// You should use [`RawTable::remove`] instead of this function, + /// or be careful with calling this function directly, because compiler + /// calls its destructor when readed `value` goes out of scope. It + /// can cause double dropping when [`RawTable`] goes out of scope, + /// because of not erased `data control byte`. + /// + /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html + /// [`RawTable`]: crate::raw::RawTable + /// [`RawTable::remove`]: crate::raw::RawTable::remove #[inline] - pub unsafe fn read(&self) -> T { + pub(crate) unsafe fn read(&self) -> T { self.as_ptr().read() } + + /// Overwrites a memory location with the given `value` without reading + /// or dropping the old value (like [`ptr::write`] function). + /// + /// # Safety + /// + /// See [`ptr::write`] for safety concerns. + /// + /// # Note + /// + /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match + /// those for the old `T` value, as the map will not re-evaluate where the new + /// value should go, meaning the value may become "lost" if their location + /// does not reflect their state. + /// + /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html #[inline] - pub unsafe fn write(&self, val: T) { + pub(crate) unsafe fn write(&self, val: T) { self.as_ptr().write(val); } + + /// Returns a shared immutable reference to the `value`. + /// + /// # Safety + /// + /// See [`NonNull::as_ref`] for safety concerns. + /// + /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "raw")] + /// # fn test() { + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::raw::{Bucket, RawTable}; + /// + /// type NewHashBuilder = core::hash::BuildHasherDefault; + /// + /// fn make_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let hash_builder = NewHashBuilder::default(); + /// let mut table = RawTable::new(); + /// + /// let value: (&str, String) = ("A pony", "is a small horse".to_owned()); + /// let hash = make_hash(&hash_builder, &value.0); + /// + /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0)); + /// + /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap(); + /// + /// assert_eq!( + /// unsafe { bucket.as_ref() }, + /// &("A pony", "is a small horse".to_owned()) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "raw")] + /// # test() + /// # } + /// ``` #[inline] pub unsafe fn as_ref<'a>(&self) -> &'a T { &*self.as_ptr() } + + /// Returns a unique mutable reference to the `value`. + /// + /// # Safety + /// + /// See [`NonNull::as_mut`] for safety concerns. + /// + /// # Note + /// + /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match + /// those for the old `T` value, as the map will not re-evaluate where the new + /// value should go, meaning the value may become "lost" if their location + /// does not reflect their state. + /// + /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "raw")] + /// # fn test() { + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::raw::{Bucket, RawTable}; + /// + /// type NewHashBuilder = core::hash::BuildHasherDefault; + /// + /// fn make_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let hash_builder = NewHashBuilder::default(); + /// let mut table = RawTable::new(); + /// + /// let value: (&str, String) = ("A pony", "is a small horse".to_owned()); + /// let hash = make_hash(&hash_builder, &value.0); + /// + /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0)); + /// + /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap(); + /// + /// unsafe { + /// bucket + /// .as_mut() + /// .1 + /// .push_str(" less than 147 cm at the withers") + /// }; + /// assert_eq!( + /// unsafe { bucket.as_ref() }, + /// &( + /// "A pony", + /// "is a small horse less than 147 cm at the withers".to_owned() + /// ) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "raw")] + /// # test() + /// # } + /// ``` #[inline] pub unsafe fn as_mut<'a>(&self) -> &'a mut T { &mut *self.as_ptr() } + + /// Copies `size_of` bytes from `other` to `self`. The source + /// and destination may *not* overlap. + /// + /// # Safety + /// + /// See [`ptr::copy_nonoverlapping`] for safety concerns. + /// + /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of + /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values + /// in the region beginning at `*self` and the region beginning at `*other` can + /// [violate memory safety]. + /// + /// # Note + /// + /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match + /// those for the old `T` value, as the map will not re-evaluate where the new + /// value should go, meaning the value may become "lost" if their location + /// does not reflect their state. + /// + /// [`ptr::copy_nonoverlapping`]: https://doc.rust-lang.org/core/ptr/fn.copy_nonoverlapping.html + /// [`read`]: https://doc.rust-lang.org/core/ptr/fn.read.html + /// [violate memory safety]: https://doc.rust-lang.org/std/ptr/fn.read.html#ownership-of-the-returned-value + /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html + /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html #[cfg(feature = "raw")] #[inline] pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) { @@ -358,15 +785,16 @@ impl Bucket { } /// A raw hash table with an unsafe API. -pub struct RawTable { - table: RawTableInner, +pub struct RawTable { + table: RawTableInner, + alloc: A, // Tell dropck that we own instances of T. marker: PhantomData, } /// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless /// of how many different key-value types are used. -struct RawTableInner { +struct RawTableInner { // Mask to get an index from a hash value. The value is one less than the // number of buckets in the table. bucket_mask: usize, @@ -380,8 +808,6 @@ struct RawTableInner { // Number of elements in the table, only really used by len() items: usize, - - alloc: A, } impl RawTable { @@ -393,7 +819,8 @@ impl RawTable { #[inline] pub const fn new() -> Self { Self { - table: RawTableInner::new_in(Global), + table: RawTableInner::NEW, + alloc: Global, marker: PhantomData, } } @@ -412,7 +839,9 @@ impl RawTable { } } -impl RawTable { +impl RawTable { + const TABLE_LAYOUT: TableLayout = TableLayout::new::(); + /// Creates a new empty hash table without allocating any memory, using the /// given allocator. /// @@ -420,9 +849,10 @@ impl RawTable { /// leave the data pointer dangling since that bucket is never written to /// due to our load factor forcing us to always have at least 1 free bucket. #[inline] - pub fn new_in(alloc: A) -> Self { + pub const fn new_in(alloc: A) -> Self { Self { - table: RawTableInner::new_in(alloc), + table: RawTableInner::NEW, + alloc, marker: PhantomData, } } @@ -440,73 +870,99 @@ impl RawTable { Ok(Self { table: RawTableInner::new_uninitialized( - alloc, - TableLayout::new::(), + &alloc, + Self::TABLE_LAYOUT, buckets, fallibility, )?, + alloc, marker: PhantomData, }) } - /// Attempts to allocate a new hash table with at least enough capacity - /// for inserting the given number of elements without reallocating. - fn fallible_with_capacity( - alloc: A, - capacity: usize, - fallibility: Fallibility, - ) -> Result { + /// Attempts to allocate a new hash table using the given allocator, with at least enough + /// capacity for inserting the given number of elements without reallocating. + #[cfg(feature = "raw")] + pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { Ok(Self { table: RawTableInner::fallible_with_capacity( - alloc, - TableLayout::new::(), + &alloc, + Self::TABLE_LAYOUT, capacity, - fallibility, + Fallibility::Fallible, )?, + alloc, marker: PhantomData, }) } - /// Attempts to allocate a new hash table using the given allocator, with at least enough - /// capacity for inserting the given number of elements without reallocating. - #[cfg(feature = "raw")] - pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { - Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible) - } - /// Allocates a new hash table using the given allocator, with at least enough capacity for /// inserting the given number of elements without reallocating. pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) { - Ok(capacity) => capacity, - Err(_) => unsafe { hint::unreachable_unchecked() }, + Self { + table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity), + alloc, + marker: PhantomData, } } /// Returns a reference to the underlying allocator. #[inline] pub fn allocator(&self) -> &A { - &self.table.alloc + &self.alloc } - /// Deallocates the table without dropping any entries. - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn free_buckets(&mut self) { - self.table.free_buckets(TableLayout::new::()); + /// Returns pointer to one past last `data` element in the the table as viewed from + /// the start point of the allocation. + /// + /// The caller must ensure that the `RawTable` outlives the returned [`NonNull`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + pub fn data_end(&self) -> NonNull { + // SAFETY: `self.table.ctrl` is `NonNull`, so casting it is safe + // + // `self.table.ctrl.as_ptr().cast()` returns pointer that + // points here (to the end of `T0`) + // ∨ + // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m + // \________ ________/ + // \/ + // `n = buckets - 1`, i.e. `RawTable::buckets() - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`. + // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search + // with loading `Group` bytes from the heap works properly, even if the result + // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also + // `RawTableInner::set_ctrl` function. + // + // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) } } - /// Returns pointer to one past last element of data table. + /// Returns pointer to start of data table. #[inline] - pub unsafe fn data_end(&self) -> NonNull { - NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) + #[cfg(any(feature = "raw", feature = "nightly"))] + pub unsafe fn data_start(&self) -> NonNull { + NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets())) } - /// Returns pointer to start of data table. + /// Return the information about memory allocated by the table. + /// + /// `RawTable` allocates single memory block to store both data and metadata. + /// This function returns allocation size and alignment and the beginning of the area. + /// These are the arguments which will be passed to `dealloc` when the table is dropped. + /// + /// This function might be useful for memory profiling. #[inline] - #[cfg(feature = "nightly")] - pub unsafe fn data_start(&self) -> *mut T { - self.data_end().as_ptr().wrapping_sub(self.buckets()) + #[cfg(feature = "raw")] + pub fn allocation_info(&self) -> (NonNull, Layout) { + // SAFETY: We use the same `table_layout` that was used to allocate + // this table. + unsafe { self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) } } /// Returns the index of a bucket from a `Bucket`. @@ -516,8 +972,55 @@ impl RawTable { } /// Returns a pointer to an element in the table. + /// + /// The caller must ensure that the `RawTable` outlives the returned [`Bucket`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the caller of this function must observe the + /// following safety rules: + /// + /// * The table must already be allocated; + /// + /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`] + /// function, i.e. `(index + 1) <= self.buckets()`. + /// + /// It is safe to call this function with index of zero (`index == 0`) on a table that has + /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`]. + /// + /// If `mem::size_of::() == 0`, then the only requirement is that the `index` must + /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e. + /// `(index + 1) <= self.buckets()`. + /// + /// [`RawTable::buckets`]: RawTable::buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] pub unsafe fn bucket(&self, index: usize) -> Bucket { + // If mem::size_of::() != 0 then return a pointer to the `element` in the `data part` of the table + // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than + // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"): + // + // `table.bucket(3).as_ptr()` returns a pointer that points here in the `data` + // part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`) + // | + // | `base = self.data_end()` points here + // | (to the start of CT0 or to the end of T0) + // v v + // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m + // ^ \__________ __________/ + // `table.bucket(3)` returns a pointer that points \/ + // here in the `data` part of the `RawTable` (to additional control bytes + // the end of T3) `m = Group::WIDTH - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`; + // CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from + // the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask` + // is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function. + // + // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`. debug_assert_ne!(self.table.bucket_mask, 0); debug_assert!(index < self.buckets()); Bucket::from_base_index(self.data_end(), index) @@ -525,8 +1028,7 @@ impl RawTable { /// Erases an element from the table without dropping it. #[cfg_attr(feature = "inline-more", inline)] - #[deprecated(since = "0.8.1", note = "use erase or remove instead")] - pub unsafe fn erase_no_drop(&mut self, item: &Bucket) { + unsafe fn erase_no_drop(&mut self, item: &Bucket) { let index = self.bucket_index(item); self.table.erase(index); } @@ -534,7 +1036,6 @@ impl RawTable { /// Erases an element from the table, dropping it in place. #[cfg_attr(feature = "inline-more", inline)] #[allow(clippy::needless_pass_by_value)] - #[allow(deprecated)] pub unsafe fn erase(&mut self, item: Bucket) { // Erase the element from the table first since drop might panic. self.erase_no_drop(&item); @@ -558,12 +1059,18 @@ impl RawTable { } /// Removes an element from the table, returning it. + /// + /// This also returns an `InsertSlot` pointing to the newly free bucket. #[cfg_attr(feature = "inline-more", inline)] #[allow(clippy::needless_pass_by_value)] - #[allow(deprecated)] - pub unsafe fn remove(&mut self, item: Bucket) -> T { + pub unsafe fn remove(&mut self, item: Bucket) -> (T, InsertSlot) { self.erase_no_drop(&item); - item.read() + ( + item.read(), + InsertSlot { + index: self.bucket_index(&item), + }, + ) } /// Finds and removes an element from the table, returning it. @@ -571,7 +1078,7 @@ impl RawTable { pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { // Avoid `Option::map` because it bloats LLVM IR. match self.find(hash, eq) { - Some(bucket) => Some(unsafe { self.remove(bucket) }), + Some(bucket) => Some(unsafe { self.remove(bucket).0 }), None => None, } } @@ -585,18 +1092,17 @@ impl RawTable { /// Removes all elements from the table without freeing the backing memory. #[cfg_attr(feature = "inline-more", inline)] pub fn clear(&mut self) { + if self.is_empty() { + // Special case empty table to avoid surprising O(capacity) time. + return; + } // Ensure that the table is reset even if one of the drops panic let mut self_ = guard(self, |self_| self_.clear_no_drop()); unsafe { - self_.drop_elements(); - } - } - - unsafe fn drop_elements(&mut self) { - if mem::needs_drop::() && !self.is_empty() { - for item in self.iter() { - item.drop(); - } + // SAFETY: ScopeGuard sets to zero the `items` field of the table + // even in case of panic during the dropping of the elements so + // that there will be no double drop of the elements. + self_.table.drop_elements::(); } } @@ -607,7 +1113,16 @@ impl RawTable { // space for. let min_size = usize::max(self.table.items, min_size); if min_size == 0 { - *self = Self::new_in(self.table.alloc.clone()); + let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } return; } @@ -624,14 +1139,33 @@ impl RawTable { if min_buckets < self.buckets() { // Fast path if the table is empty if self.table.items == 0 { - *self = Self::with_capacity_in(min_size, self.table.alloc.clone()); + let new_inner = + RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size); + let mut old_inner = mem::replace(&mut self.table, new_inner); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } } else { // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - if self - .resize(min_size, hasher, Fallibility::Infallible) - .is_err() - { - unsafe { hint::unreachable_unchecked() } + unsafe { + // SAFETY: + // 1. We know for sure that `min_size >= self.table.items`. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since + // we will never expose RawTable::new_uninitialized in a public API. + if self + .resize(min_size, hasher, Fallibility::Infallible) + .is_err() + { + // SAFETY: The result of calling the `resize` function cannot be an error + // because `fallibility == Fallibility::Infallible. + hint::unreachable_unchecked() + } } } } @@ -641,13 +1175,18 @@ impl RawTable { /// without reallocation. #[cfg_attr(feature = "inline-more", inline)] pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { - if additional > self.table.growth_left { + if unlikely(additional > self.table.growth_left) { // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - if self - .reserve_rehash(additional, hasher, Fallibility::Infallible) - .is_err() - { - unsafe { hint::unreachable_unchecked() } + unsafe { + // SAFETY: The [`RawTableInner`] must already have properly initialized control + // bytes since we will never expose RawTable::new_uninitialized in a public API. + if self + .reserve_rehash(additional, hasher, Fallibility::Infallible) + .is_err() + { + // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`. + hint::unreachable_unchecked() + } } } } @@ -661,28 +1200,45 @@ impl RawTable { hasher: impl Fn(&T) -> u64, ) -> Result<(), TryReserveError> { if additional > self.table.growth_left { - self.reserve_rehash(additional, hasher, Fallibility::Fallible) + // SAFETY: The [`RawTableInner`] must already have properly initialized control + // bytes since we will never expose RawTable::new_uninitialized in a public API. + unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) } } else { Ok(()) } } /// Out-of-line slow path for `reserve` and `try_reserve`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes, + /// otherwise calling this function results in [`undefined behavior`] + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[cold] #[inline(never)] - fn reserve_rehash( + unsafe fn reserve_rehash( &mut self, additional: usize, hasher: impl Fn(&T) -> u64, fallibility: Fallibility, ) -> Result<(), TryReserveError> { unsafe { + // SAFETY: + // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 2. The `drop` function is the actual drop function of the elements stored in + // the table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. self.table.reserve_rehash_inner( + &self.alloc, additional, &|table, index| hasher(table.bucket::(index).as_ref()), fallibility, - TableLayout::new::(), - if mem::needs_drop::() { + Self::TABLE_LAYOUT, + if T::NEEDS_DROP { Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) } else { None @@ -693,20 +1249,50 @@ impl RawTable { /// Allocates a new table of a different size and moves the contents of the /// current table into it. - fn resize( + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes, + /// otherwise calling this function results in [`undefined behavior`] + /// + /// The caller of this function must ensure that `capacity >= self.table.items` + /// otherwise: + /// + /// * If `self.table.items != 0`, calling of this function with `capacity` + /// equal to 0 (`capacity == 0`) results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and + /// `self.table.items > capacity_to_buckets(capacity)` + /// calling this function results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and + /// `self.table.items > capacity_to_buckets(capacity)` + /// calling this function are never return (will go into an + /// infinite loop). + /// + /// See [`RawTableInner::find_insert_slot`] for more information. + /// + /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn resize( &mut self, capacity: usize, hasher: impl Fn(&T) -> u64, fallibility: Fallibility, ) -> Result<(), TryReserveError> { - unsafe { - self.table.resize_inner( - capacity, - &|table, index| hasher(table.bucket::(index).as_ref()), - fallibility, - TableLayout::new::(), - ) - } + // SAFETY: + // 1. The caller of this function guarantees that `capacity >= self.table.items`. + // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. + self.table.resize_inner( + &self.alloc, + capacity, + &|table, index| hasher(table.bucket::(index).as_ref()), + fallibility, + Self::TABLE_LAYOUT, + ) } /// Inserts a new element into the table, and returns its raw bucket. @@ -715,22 +1301,27 @@ impl RawTable { #[cfg_attr(feature = "inline-more", inline)] pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { unsafe { - let mut index = self.table.find_insert_slot(hash); + // SAFETY: + // 1. The [`RawTableInner`] must already have properly initialized control bytes since + // we will never expose `RawTable::new_uninitialized` in a public API. + // + // 2. We reserve additional space (if necessary) right after calling this function. + let mut slot = self.table.find_insert_slot(hash); - // We can avoid growing the table once we have reached our load - // factor if we are replacing a tombstone. This works since the - // number of EMPTY slots does not change in this case. - let old_ctrl = *self.table.ctrl(index); + // We can avoid growing the table once we have reached our load factor if we are replacing + // a tombstone. This works since the number of EMPTY slots does not change in this case. + // + // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index + // in the range `0..=self.buckets()`. + let old_ctrl = *self.table.ctrl(slot.index); if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) { self.reserve(1, hasher); - index = self.table.find_insert_slot(hash); + // SAFETY: We know for sure that `RawTableInner` has control bytes + // initialized and that there is extra space in the table. + slot = self.table.find_insert_slot(hash); } - self.table.record_item_insert_at(index, old_ctrl, hash); - - let bucket = self.bucket(index); - bucket.write(value); - bucket + self.insert_in_slot(hash, slot, value) } } @@ -796,9 +1387,9 @@ impl RawTable { { let index = self.bucket_index(&bucket); let old_ctrl = *self.table.ctrl(index); - debug_assert!(is_full(old_ctrl)); + debug_assert!(self.is_bucket_full(index)); let old_growth_left = self.table.growth_left; - let item = self.remove(bucket); + let item = self.remove(bucket).0; if let Some(new_item) = f(item) { self.table.growth_left = old_growth_left; self.table.set_ctrl(index, old_ctrl); @@ -810,28 +1401,89 @@ impl RawTable { } } - /// Searches for an element in the table. + /// Searches for an element in the table. If the element is not found, + /// returns `Err` with the position of a slot where an element with the + /// same hash could be inserted. + /// + /// This function may resize the table if additional space is required for + /// inserting an element. #[inline] - pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { - let result = self.table.find_inner(hash, &mut |index| unsafe { - eq(self.bucket(index).as_ref()) - }); + pub fn find_or_find_insert_slot( + &mut self, + hash: u64, + mut eq: impl FnMut(&T) -> bool, + hasher: impl Fn(&T) -> u64, + ) -> Result, InsertSlot> { + self.reserve(1, hasher); - // Avoid `Option::map` because it bloats LLVM IR. - match result { - Some(index) => Some(unsafe { self.bucket(index) }), - None => None, + unsafe { + // SAFETY: + // 1. We know for sure that there is at least one empty `bucket` in the table. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will + // never expose `RawTable::new_uninitialized` in a public API. + // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket, + // which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in + // the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe. + match self + .table + .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref())) + { + // SAFETY: See explanation above. + Ok(index) => Ok(self.bucket(index)), + Err(slot) => Err(slot), + } } } - /// Gets a reference to an element in the table. + /// Inserts a new element into the table in the given slot, and returns its + /// raw bucket. + /// + /// # Safety + /// + /// `slot` must point to a slot previously returned by + /// `find_or_find_insert_slot`, and no mutation of the table must have + /// occurred since that call. #[inline] - pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { - // Avoid `Option::map` because it bloats LLVM IR. - match self.find(hash, eq) { - Some(bucket) => Some(unsafe { bucket.as_ref() }), - None => None, - } + pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket { + let old_ctrl = *self.table.ctrl(slot.index); + self.table.record_item_insert_at(slot.index, old_ctrl, hash); + + let bucket = self.bucket(slot.index); + bucket.write(value); + bucket + } + + /// Searches for an element in the table. + #[inline] + pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { + unsafe { + // SAFETY: + // 1. The [`RawTableInner`] must already have properly initialized control bytes since we + // will never expose `RawTable::new_uninitialized` in a public API. + // 1. The `find_inner` function returns the `index` of only the full bucket, which is in + // the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref` + // is safe. + let result = self + .table + .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref())); + + // Avoid `Option::map` because it bloats LLVM IR. + match result { + // SAFETY: See explanation above. + Some(index) => Some(self.bucket(index)), + None => None, + } + } + } + + /// Gets a reference to an element in the table. + #[inline] + pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.find(hash, eq) { + Some(bucket) => Some(unsafe { bucket.as_ref() }), + None => None, + } } /// Gets a mutable reference to an element in the table. @@ -928,17 +1580,27 @@ impl RawTable { self.table.bucket_mask + 1 } + /// Checks whether the bucket at `index` is full. + /// + /// # Safety + /// + /// The caller must ensure `index` is less than the number of buckets. + #[inline] + pub unsafe fn is_bucket_full(&self, index: usize) -> bool { + self.table.is_bucket_full(index) + } + /// Returns an iterator over every element in the table. It is up to /// the caller to ensure that the `RawTable` outlives the `RawIter`. /// Because we cannot make the `next` method unsafe on the `RawIter` /// struct, we have to make the `iter` method unsafe. #[inline] pub unsafe fn iter(&self) -> RawIter { - let data = Bucket::from_base_index(self.data_end(), 0); - RawIter { - iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), - items: self.table.items, - } + // SAFETY: + // 1. The caller must uphold the safety contract for `iter` method. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since + // we will never expose RawTable::new_uninitialized in a public API. + self.table.iter() } /// Returns an iterator over occupied buckets that could match a given hash. @@ -952,7 +1614,7 @@ impl RawTable { /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe. #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "raw")] - pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> { + pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash { RawIterHash::new(self, hash) } @@ -978,8 +1640,8 @@ impl RawTable { debug_assert_eq!(iter.len(), self.len()); RawDrain { iter, - table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))), - orig_table: NonNull::from(self), + table: mem::replace(&mut self.table, RawTableInner::NEW), + orig_table: NonNull::from(&mut self.table), marker: PhantomData, } } @@ -993,31 +1655,31 @@ impl RawTable { pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { debug_assert_eq!(iter.len(), self.len()); - let alloc = self.table.alloc.clone(); let allocation = self.into_allocation(); RawIntoIter { iter, allocation, marker: PhantomData, - alloc, } } /// Converts the table into a raw allocation. The contents of the table /// should be dropped using a `RawIter` before freeing the allocation. #[cfg_attr(feature = "inline-more", inline)] - pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout)> { + pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout, A)> { let alloc = if self.table.is_empty_singleton() { None } else { // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. - let (layout, ctrl_offset) = match calculate_layout::(self.table.buckets()) { - Some(lco) => lco, - None => unsafe { hint::unreachable_unchecked() }, - }; + let (layout, ctrl_offset) = + match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) { + Some(lco) => lco, + None => unsafe { hint::unreachable_unchecked() }, + }; Some(( unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, layout, + unsafe { ptr::read(&self.alloc) }, )) }; mem::forget(self); @@ -1025,41 +1687,62 @@ impl RawTable { } } -unsafe impl Send for RawTable +unsafe impl Send for RawTable where T: Send, A: Send, { } -unsafe impl Sync for RawTable +unsafe impl Sync for RawTable where T: Sync, A: Sync, { } -impl RawTableInner { +impl RawTableInner { + const NEW: Self = RawTableInner::new(); + + /// Creates a new empty hash table without allocating any memory. + /// + /// In effect this returns a table with exactly 1 bucket. However we can + /// leave the data pointer dangling since that bucket is never accessed + /// due to our load factor forcing us to always have at least 1 free bucket. #[inline] - const fn new_in(alloc: A) -> Self { + const fn new() -> Self { Self { // Be careful to cast the entire slice to a raw pointer. ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, bucket_mask: 0, items: 0, growth_left: 0, - alloc, } } } -impl RawTableInner { +impl RawTableInner { + /// Allocates a new [`RawTableInner`] with the given number of buckets. + /// The control bytes and buckets are left uninitialized. + /// + /// # Safety + /// + /// The caller of this function must ensure that the `buckets` is power of two + /// and also initialize all control bytes of the length `self.bucket_mask + 1 + + /// Group::WIDTH` with the [`EMPTY`] bytes. + /// + /// See also [`Allocator`] API for other safety concerns. + /// + /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html #[cfg_attr(feature = "inline-more", inline)] - unsafe fn new_uninitialized( - alloc: A, + unsafe fn new_uninitialized( + alloc: &A, table_layout: TableLayout, buckets: usize, fallibility: Fallibility, - ) -> Result { + ) -> Result + where + A: Allocator, + { debug_assert!(buckets.is_power_of_two()); // Avoid `Option::ok_or_else` because it bloats LLVM IR. @@ -1068,45 +1751,48 @@ impl RawTableInner { None => return Err(fallibility.capacity_overflow()), }; - // We need an additional check to ensure that the allocation doesn't - // exceed `isize::MAX`. We can skip this check on 64-bit systems since - // such allocations will never succeed anyways. - // - // This mirrors what Vec does in the standard library. - if mem::size_of::() < 8 && layout.size() > isize::MAX as usize { - return Err(fallibility.capacity_overflow()); - } - - let ptr: NonNull = match do_alloc(&alloc, layout) { + let ptr: NonNull = match do_alloc(alloc, layout) { Ok(block) => block.cast(), Err(_) => return Err(fallibility.alloc_err(layout)), }; + // SAFETY: null pointer will be caught in above check let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); Ok(Self { ctrl, bucket_mask: buckets - 1, items: 0, growth_left: bucket_mask_to_capacity(buckets - 1), - alloc, }) } + /// Attempts to allocate a new [`RawTableInner`] with at least enough + /// capacity for inserting the given number of elements without reallocating. + /// + /// All the control bytes are initialized with the [`EMPTY`] bytes. #[inline] - fn fallible_with_capacity( - alloc: A, + fn fallible_with_capacity( + alloc: &A, table_layout: TableLayout, capacity: usize, fallibility: Fallibility, - ) -> Result { + ) -> Result + where + A: Allocator, + { if capacity == 0 { - Ok(Self::new_in(alloc)) + Ok(Self::NEW) } else { + // SAFETY: We checked that we could successfully allocate the new table, and then + // initialized all control bytes with the constant `EMPTY` byte. unsafe { let buckets = capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?; let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?; + // SAFETY: We checked that the table is allocated and therefore the table already has + // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for) + // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe. result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes()); Ok(result) @@ -1114,66 +1800,397 @@ impl RawTableInner { } } - /// Searches for an empty or deleted bucket which is suitable for inserting - /// a new element and sets the hash for that slot. + /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting + /// the given number of elements without reallocating. + /// + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to + /// handle memory allocation failure. + /// + /// All the control bytes are initialized with the [`EMPTY`] bytes. + /// + /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity + /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html + fn with_capacity(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self + where + A: Allocator, + { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) { + Ok(table_inner) => table_inner, + // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`. + Err(_) => unsafe { hint::unreachable_unchecked() }, + } + } + + /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method. + /// + /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control + /// bytes outside the range of the table are filled with [`EMPTY`] entries. These will unfortunately + /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because + /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking + /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied. + /// We detect this situation here and perform a second scan starting at the beginning of the table. + /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the + /// trailing control bytes (containing [`EMPTY`] bytes). + /// + /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an + /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and + /// `Safety`). + /// + /// # Warning + /// + /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than + /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the + /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that + /// index will cause immediate [`undefined behavior`]. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method. + /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work + /// of this crate, the following rules are necessary and sufficient: + /// + /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this + /// function results in [`undefined behavior`]. + /// + /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`] + /// (after the `find_insert_slot_in_group` function, but before insertion into the table). + /// + /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()` + /// (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function). + /// + /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`] + /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the + /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`). + /// + /// [`RawTableInner::ctrl`]: RawTableInner::ctrl + /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot { + // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`. + if unlikely(self.is_bucket_full(index)) { + debug_assert!(self.bucket_mask < Group::WIDTH); + // SAFETY: + // + // * Since the caller of this function ensures that the control bytes are properly + // initialized and `ptr = self.ctrl(0)` points to the start of the array of control + // bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH` + // and points to the properly initialized control bytes (see also + // `TableLayout::calculate_layout_for` and `ptr::read`); + // + // * Because the caller of this function ensures that the index was provided by the + // `self.find_insert_slot_in_group()` function, so for for tables larger than the + // group width (self.buckets() >= Group::WIDTH), we will never end up in the given + // branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group` + // cannot return a full bucket index. For tables smaller than the group width, calling + // the `unwrap_unchecked` function is also safe, as the trailing control bytes outside + // the range of the table are filled with EMPTY bytes (and we know for sure that there + // is at least one FULL bucket), so this second scan either finds an empty slot (due to + // the load factor) or hits the trailing control bytes (containing EMPTY). + index = Group::load_aligned(self.ctrl(0)) + .match_empty_or_deleted() + .lowest_set_bit() + .unwrap_unchecked(); + } + InsertSlot { index } + } + + /// Finds the position to insert something in a group. + /// + /// **This may have false positives and must be fixed up with `fix_insert_slot` + /// before it's used.** + /// + /// The function is guaranteed to return the index of an empty or deleted [`Bucket`] + /// in the range `0..self.buckets()` (`0..=self.bucket_mask`). + #[inline] + fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option { + let bit = group.match_empty_or_deleted().lowest_set_bit(); + + if likely(bit.is_some()) { + // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask) + } else { + None + } + } + + /// Searches for an element in the table, or a potential slot where that element could + /// be inserted (an empty or deleted [`Bucket`] index). + /// + /// This uses dynamic dispatch to reduce the amount of code generated, but that is + /// eliminated by LLVM optimizations. + /// + /// This function does not make any changes to the `data` part of the table, or any + /// changes to the `items` or `growth_left` field of the table. /// - /// There must be at least 1 empty bucket in the table. + /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the + /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function + /// will never return (will go into an infinite loop) for tables larger than the group + /// width, or return an index outside of the table indices range if the table is less + /// than the group width. + /// + /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool` + /// function with only `FULL` buckets' indices and return the `index` of the found + /// element (as `Ok(index)`). If the element is not found and there is at least 1 + /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return + /// [InsertSlot] with an index in the range `0..self.buckets()`, but in any case, + /// if this function returns [`InsertSlot`], it will contain an index in the range + /// `0..=self.buckets()`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling + /// this function results in [`undefined behavior`]. + /// + /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is + /// less than the group width and if there was not at least one empty or deleted bucket in + /// the table will cause immediate [`undefined behavior`]. This is because in this case the + /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY] + /// control bytes outside the table range. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) { - let index = self.find_insert_slot(hash); + unsafe fn find_or_find_insert_slot_inner( + &self, + hash: u64, + eq: &mut dyn FnMut(usize) -> bool, + ) -> Result { + let mut insert_slot = None; + + let h2_hash = h2(hash); + let mut probe_seq = self.probe_seq(hash); + + loop { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` + // of the table due to masking with `self.bucket_mask` and also because mumber of + // buckets is a power of two (see `self.probe_seq` function). + // + // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to + // call `Group::load` due to the extended control bytes range, which is + // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control + // byte will never be read for the allocated table); + // + // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will + // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` + // bytes, which is safe (see RawTableInner::new). + let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; + + for bit in group.match_byte(h2_hash) { + let index = (probe_seq.pos + bit) & self.bucket_mask; + + if likely(eq(index)) { + return Ok(index); + } + } + + // We didn't find the element we were looking for in the group, try to get an + // insertion slot from the group if we don't have one yet. + if likely(insert_slot.is_none()) { + insert_slot = self.find_insert_slot_in_group(&group, &probe_seq); + } + + // Only stop the search if the group contains at least one empty element. + // Otherwise, the element that we are looking for might be in a following group. + if likely(group.match_empty().any_bit_set()) { + // We must have found a insert slot by now, since the current group contains at + // least one. For tables smaller than the group width, there will still be an + // empty element in the current (and only) group due to the load factor. + unsafe { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * We use this function with the slot / index found by `self.find_insert_slot_in_group` + return Err(self.fix_insert_slot(insert_slot.unwrap_unchecked())); + } + } + + probe_seq.move_next(self.bucket_mask); + } + } + + /// Searches for an empty or deleted bucket which is suitable for inserting a new + /// element and sets the hash for that slot. Returns an index of that slot and the + /// old control byte stored in the found index. + /// + /// This function does not check if the given element exists in the table. Also, + /// this function does not check if there is enough space in the table to insert + /// a new element. Caller of the funtion must make ensure that the table has at + /// least 1 empty or deleted `bucket`, otherwise this function will never return + /// (will go into an infinite loop) for tables larger than the group width, or + /// return an index outside of the table indices range if the table is less than + /// the group width. + /// + /// If there is at least 1 empty or deleted `bucket` in the table, the function is + /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case, + /// if this function returns an `index` it will be in the range `0..=self.buckets()`. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for the + /// [`RawTableInner::set_ctrl_h2`] and [`RawTableInner::find_insert_slot`] methods. + /// Thus, in order to uphold the safety contracts for that methods, as well as for + /// the correct logic of the work of this crate, you must observe the following rules + /// when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated and has properly initialized + /// control bytes otherwise calling this function results in [`undefined behavior`]. + /// + /// * The caller of this function must ensure that the "data" parts of the table + /// will have an entry in the returned index (matching the given hash) right + /// after calling this function. + /// + /// Attempt to write data at the `index` returned by this function when the table is + /// less than the group width and if there was not at least one empty or deleted bucket in + /// the table will cause immediate [`undefined behavior`]. This is because in this case the + /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY] + /// control bytes outside the table range. + /// + /// The caller must independently increase the `items` field of the table, and also, + /// if the old control byte was [`EMPTY`], then decrease the table's `growth_left` + /// field, and do not change it if the old control byte was [`DELETED`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`RawTableInner::ctrl`]: RawTableInner::ctrl + /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2 + /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot + #[inline] + unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, u8) { + // SAFETY: Caller of this function ensures that the control bytes are properly initialized. + let index: usize = self.find_insert_slot(hash).index; + // SAFETY: + // 1. The `find_insert_slot` function either returns an `index` less than or + // equal to `self.buckets() = self.bucket_mask + 1` of the table, or never + // returns if it cannot find an empty or deleted slot. + // 2. The caller of this function guarantees that the table has already been + // allocated let old_ctrl = *self.ctrl(index); self.set_ctrl_h2(index, hash); (index, old_ctrl) } /// Searches for an empty or deleted bucket which is suitable for inserting - /// a new element. + /// a new element, returning the `index` for the new [`Bucket`]. + /// + /// This function does not make any changes to the `data` part of the table, or any + /// changes to the `items` or `growth_left` field of the table. + /// + /// The table must have at least 1 empty or deleted `bucket`, otherwise this function + /// will never return (will go into an infinite loop) for tables larger than the group + /// width, or return an index outside of the table indices range if the table is less + /// than the group width. + /// + /// If there is at least 1 empty or deleted `bucket` in the table, the function is + /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`, + /// but in any case, if this function returns [`InsertSlot`], it will contain an index + /// in the range `0..=self.buckets()`. /// - /// There must be at least 1 empty bucket in the table. + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling + /// this function results in [`undefined behavior`]. + /// + /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is + /// less than the group width and if there was not at least one empty or deleted bucket in + /// the table will cause immediate [`undefined behavior`]. This is because in this case the + /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY] + /// control bytes outside the table range. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - fn find_insert_slot(&self, hash: u64) -> usize { + unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot { let mut probe_seq = self.probe_seq(hash); loop { - unsafe { - let group = Group::load(self.ctrl(probe_seq.pos)); - if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() { - let result = (probe_seq.pos + bit) & self.bucket_mask; - - // In tables smaller than the group width, trailing control - // bytes outside the range of the table are filled with - // EMPTY entries. These will unfortunately trigger a - // match, but once masked may point to a full bucket that - // is already occupied. We detect this situation here and - // perform a second scan starting at the beginning of the - // table. This second scan is guaranteed to find an empty - // slot (due to the load factor) before hitting the trailing - // control bytes (containing EMPTY). - if unlikely(is_full(*self.ctrl(result))) { - debug_assert!(self.bucket_mask < Group::WIDTH); - debug_assert_ne!(probe_seq.pos, 0); - return Group::load_aligned(self.ctrl(0)) - .match_empty_or_deleted() - .lowest_set_bit_nonzero(); - } + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` + // of the table due to masking with `self.bucket_mask` and also because mumber of + // buckets is a power of two (see `self.probe_seq` function). + // + // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to + // call `Group::load` due to the extended control bytes range, which is + // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control + // byte will never be read for the allocated table); + // + // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will + // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` + // bytes, which is safe (see RawTableInner::new). + let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; - return result; + let index = self.find_insert_slot_in_group(&group, &probe_seq); + if likely(index.is_some()) { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * We use this function with the slot / index found by `self.find_insert_slot_in_group` + unsafe { + return self.fix_insert_slot(index.unwrap_unchecked()); } } probe_seq.move_next(self.bucket_mask); } } - /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of - /// code generated, but it is eliminated by LLVM optimizations. - #[inline] - fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option { + /// Searches for an element in a table, returning the `index` of the found element. + /// This uses dynamic dispatch to reduce the amount of code generated, but it is + /// eliminated by LLVM optimizations. + /// + /// This function does not make any changes to the `data` part of the table, or any + /// changes to the `items` or `growth_left` field of the table. + /// + /// The table must have at least 1 empty `bucket`, otherwise, if the + /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, + /// this function will also never return (will go into an infinite loop). + /// + /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool` + /// function with only `FULL` buckets' indices and return the `index` of the found + /// element as `Some(index)`, so the index will always be in the range + /// `0..self.buckets()`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling + /// this function results in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline(always)] + unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option { let h2_hash = h2(hash); let mut probe_seq = self.probe_seq(hash); loop { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` + // of the table due to masking with `self.bucket_mask`. + // + // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to + // call `Group::load` due to the extended control bytes range, which is + // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control + // byte will never be read for the allocated table); + // + // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will + // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` + // bytes, which is safe (see RawTableInner::new_in). let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; for bit in group.match_byte(h2_hash) { + // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. let index = (probe_seq.pos + bit) & self.bucket_mask; if likely(eq(index)) { @@ -1189,12 +2206,52 @@ impl RawTableInner { } } + /// Prepares for rehashing data in place (that is, without allocating new memory). + /// Converts all full index `control bytes` to `DELETED` and all `DELETED` control + /// bytes to `EMPTY`, i.e. performs the following conversion: + /// + /// - `EMPTY` control bytes -> `EMPTY`; + /// - `DELETED` control bytes -> `EMPTY`; + /// - `FULL` control bytes -> `DELETED`. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// You must observe the following safety rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The caller of this function must convert the `DELETED` bytes back to `FULL` + /// bytes when re-inserting them into their ideal position (which was impossible + /// to do during the first insert due to tombstones). If the caller does not do + /// this, then calling this function may result in a memory leak. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes otherwise + /// calling this function results in [`undefined behavior`]. + /// + /// Calling this function on a table that has not been allocated results in + /// [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::mut_mut)] #[inline] unsafe fn prepare_rehash_in_place(&mut self) { - // Bulk convert all full control bytes to DELETED, and all DELETED - // control bytes to EMPTY. This effectively frees up all buckets - // containing a DELETED entry. + // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY. + // This effectively frees up all buckets containing a DELETED entry. + // + // SAFETY: + // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`; + // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned` + // due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`; + // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated; + // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0 + // and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for). for i in (0..self.buckets()).step_by(Group::WIDTH) { let group = Group::load_aligned(self.ctrl(i)); let group = group.convert_special_to_empty_and_full_to_deleted(); @@ -1203,15 +2260,245 @@ impl RawTableInner { // Fix up the trailing control bytes. See the comments in set_ctrl // for the handling of tables smaller than the group width. - if self.buckets() < Group::WIDTH { + // + // SAFETY: The caller of this function guarantees that [`RawTableInner`] + // has already been allocated + if unlikely(self.buckets() < Group::WIDTH) { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes, + // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to + // `Group::WIDTH` is safe self.ctrl(0) .copy_to(self.ctrl(Group::WIDTH), self.buckets()); } else { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of + // control bytes,so copying `Group::WIDTH` bytes with offset equal + // to `self.buckets() == self.bucket_mask + 1` is safe self.ctrl(0) .copy_to(self.ctrl(self.buckets()), Group::WIDTH); } } + /// Returns an iterator over every element in the table. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result + /// is [`undefined behavior`]: + /// + /// * The caller has to ensure that the `RawTableInner` outlives the + /// `RawIter`. Because we cannot make the `next` method unsafe on + /// the `RawIter` struct, we have to make the `iter` method unsafe. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// The type `T` must be the actual type of the elements stored in the table, + /// otherwise using the returned [`RawIter`] results in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn iter(&self) -> RawIter { + // SAFETY: + // 1. Since the caller of this function ensures that the control bytes + // are properly initialized and `self.data_end()` points to the start + // of the array of control bytes, therefore: `ctrl` is valid for reads, + // properly aligned to `Group::WIDTH` and points to the properly initialized + // control bytes. + // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e. + // equal to zero). + // 3. We pass the exact value of buckets of the table to the function. + // + // `ctrl` points here (to the start + // of the first control byte `CT0`) + // ∨ + // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m + // \________ ________/ + // \/ + // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`. + // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search + // with loading `Group` bytes from the heap works properly, even if the result + // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also + // `RawTableInner::set_ctrl` function. + // + // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + let data = Bucket::from_base_index(self.data_end(), 0); + RawIter { + // SAFETY: See explanation above + iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()), + items: self.items, + } + } + + /// Executes the destructors (if any) of the values stored in the table. + /// + /// # Note + /// + /// This function does not erase the control bytes of the table and does + /// not make any changes to the `items` or `growth_left` fields of the + /// table. If necessary, the caller of this function must manually set + /// up these table fields, for example using the [`clear_no_drop`] function. + /// + /// Be careful during calling this function, because drop function of + /// the elements can panic, and this can leave table in an inconsistent + /// state. + /// + /// # Safety + /// + /// The type `T` must be the actual type of the elements stored in the table, + /// otherwise calling this function may result in [`undefined behavior`]. + /// + /// If `T` is a type that should be dropped and **the table is not empty**, + /// calling this function more than once results in [`undefined behavior`]. + /// + /// If `T` is not [`Copy`], attempting to use values stored in the table after + /// calling this function may result in [`undefined behavior`]. + /// + /// It is safe to call this function on a table that has not been allocated, + /// on a table with uninitialized control bytes, and on a table with no actual + /// data but with `Full` control bytes if `self.items == 0`. + /// + /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information + /// about of properly removing or saving `element` from / into the [`RawTable`] / + /// [`RawTableInner`]. + /// + /// [`Bucket::drop`]: Bucket::drop + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`clear_no_drop`]: RawTableInner::clear_no_drop + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn drop_elements(&mut self) { + // Check that `self.items != 0`. Protects against the possibility + // of creating an iterator on an table with uninitialized control bytes. + if T::NEEDS_DROP && self.items != 0 { + // SAFETY: We know for sure that RawTableInner will outlive the + // returned `RawIter` iterator, and the caller of this function + // must uphold the safety contract for `drop_elements` method. + for item in self.iter::() { + // SAFETY: The caller must uphold the safety contract for + // `drop_elements` method. + item.drop(); + } + } + } + + /// Executes the destructors (if any) of the values stored in the table and than + /// deallocates the table. + /// + /// # Note + /// + /// Calling this function automatically makes invalid (dangling) all instances of + /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table. + /// + /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left` + /// fields of the table. If necessary, the caller of this function must manually set + /// up these table fields. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * Calling this function more than once; + /// + /// * The type `T` must be the actual type of the elements stored in the table. + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used + /// to allocate this table. + /// + /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that + /// was used to allocate this table. + /// + /// The caller of this function should pay attention to the possibility of the + /// elements' drop function panicking, because this: + /// + /// * May leave the table in an inconsistent state; + /// + /// * Memory is never deallocated, so a memory leak may occur. + /// + /// Attempt to use the `ctrl` field of the table (dereference) after calling this + /// function results in [`undefined behavior`]. + /// + /// It is safe to call this function on a table that has not been allocated, + /// on a table with uninitialized control bytes, and on a table with no actual + /// data but with `Full` control bytes if `self.items == 0`. + /// + /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`] + /// for more information. + /// + /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements + /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn drop_inner_table(&mut self, alloc: &A, table_layout: TableLayout) { + if !self.is_empty_singleton() { + unsafe { + // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method. + self.drop_elements::(); + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. The caller must uphold the safety contract for `drop_inner_table` method. + self.free_buckets(alloc, table_layout); + } + } + } + + /// Returns a pointer to an element in the table (convenience for + /// `Bucket::from_base_index(self.data_end::(), index)`). + /// + /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Safety + /// + /// If `mem::size_of::() != 0`, then the safety rules are directly derived from the + /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling + /// this function, the following safety rules must be observed: + /// + /// * The table must already be allocated; + /// + /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`] + /// function, i.e. `(index + 1) <= self.buckets()`. + /// + /// * The type `T` must be the actual type of the elements stored in the table, otherwise + /// using the returned [`Bucket`] may result in [`undefined behavior`]. + /// + /// It is safe to call this function with index of zero (`index == 0`) on a table that has + /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`]. + /// + /// If `mem::size_of::() == 0`, then the only requirement is that the `index` must + /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e. + /// `(index + 1) <= self.buckets()`. + /// + /// ```none + /// If mem::size_of::() != 0 then return a pointer to the `element` in the `data part` of the table + /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than + /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"): + /// + /// `table.bucket(3).as_ptr()` returns a pointer that points here in the `data` + /// part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`]) + /// | + /// | `base = table.data_end::()` points here + /// | (to the start of CT0 or to the end of T0) + /// v v + /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m + /// ^ \__________ __________/ + /// `table.bucket(3)` returns a pointer that points \/ + /// here in the `data` part of the `RawTableInner` additional control bytes + /// (to the end of T3) `m = Group::WIDTH - 1` + /// + /// where: T0...T_n - our stored data; + /// CT0...CT_n - control bytes or metadata for `data`; + /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from + /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask` + /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function. + /// + /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + /// ``` + /// + /// [`Bucket::from_base_index`]: Bucket::from_base_index + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn bucket(&self, index: usize) -> Bucket { debug_assert_ne!(self.bucket_mask, 0); @@ -1219,6 +2506,52 @@ impl RawTableInner { Bucket::from_base_index(self.data_end(), index) } + /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table + /// (convenience for `self.data_end::().as_ptr().sub((index + 1) * size_of)`). + /// + /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`, + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * The table must already be allocated; + /// + /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`] + /// function, i.e. `(index + 1) <= self.buckets()`; + /// + /// * The `size_of` must be equal to the size of the elements stored in the table; + /// + /// ```none + /// If mem::size_of::() != 0 then return a pointer to the `element` in the `data part` of the table + /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than + /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"): + /// + /// `table.bucket_ptr(3, mem::size_of::())` returns a pointer that points here in the + /// `data` part of the `RawTableInner`, i.e. to the start of T3 + /// | + /// | `base = table.data_end::()` points here + /// | (to the start of CT0 or to the end of T0) + /// v v + /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m + /// \__________ __________/ + /// \/ + /// additional control bytes + /// `m = Group::WIDTH - 1` + /// + /// where: T0...T_n - our stored data; + /// CT0...CT_n - control bytes or metadata for `data`; + /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from + /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask` + /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function. + /// + /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + /// ``` + /// + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { debug_assert_ne!(self.bucket_mask, 0); @@ -1227,9 +2560,47 @@ impl RawTableInner { base.sub((index + 1) * size_of) } + /// Returns pointer to one past last `data` element in the the table as viewed from + /// the start point of the allocation (convenience for `self.ctrl.cast()`). + /// + /// This function actually returns a pointer to the end of the `data element` at + /// index "0" (zero). + /// + /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Note + /// + /// The type `T` must be the actual type of the elements stored in the table, otherwise + /// using the returned [`NonNull`] may result in [`undefined behavior`]. + /// + /// ```none + /// `table.data_end::()` returns pointer that points here + /// (to the end of `T0`) + /// ∨ + /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m + /// \________ ________/ + /// \/ + /// `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` + /// + /// where: T0...T_n - our stored data; + /// CT0...CT_n - control bytes or metadata for `data`. + /// CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search + /// with loading `Group` bytes from the heap works properly, even if the result + /// of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also + /// `RawTableInner::set_ctrl` function. + /// + /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + /// ``` + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn data_end(&self) -> NonNull { - NonNull::new_unchecked(self.ctrl.as_ptr().cast()) + fn data_end(&self) -> NonNull { + unsafe { + // SAFETY: `self.ctrl` is `NonNull`, so casting it is safe + NonNull::new_unchecked(self.ctrl.as_ptr().cast()) + } } /// Returns an iterator-like object for a probe sequence on the table. @@ -1240,6 +2611,8 @@ impl RawTableInner { #[inline] fn probe_seq(&self, hash: u64) -> ProbeSeq { ProbeSeq { + // This is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. pos: h1(hash) & self.bucket_mask, stride: 0, } @@ -1250,7 +2623,7 @@ impl RawTableInner { #[cfg(feature = "raw")] #[inline] unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result { - let index = self.find_insert_slot(hash); + let index = self.find_insert_slot(hash).index; let old_ctrl = *self.ctrl(index); if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) { Err(()) @@ -1277,13 +2650,68 @@ impl RawTableInner { /// Sets a control byte to the hash, and possibly also the replicated control byte at /// the end of the array. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`] + /// method. Thus, in order to uphold the safety contracts for the method, you must observe the + /// following rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) { + unsafe fn set_ctrl_h2(&mut self, index: usize, hash: u64) { + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`] self.set_ctrl(index, h2(hash)); } + /// Replaces the hash in the control byte at the given index with the provided one, + /// and possibly also replicates the new control byte at the end of the array of control + /// bytes, returning the old control byte. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_h2`] + /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both + /// methods, you must observe the following rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2 + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 { + unsafe fn replace_ctrl_h2(&mut self, index: usize, hash: u64) -> u8 { + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`] let prev_ctrl = *self.ctrl(index); self.set_ctrl_h2(index, hash); prev_ctrl @@ -1291,10 +2719,35 @@ impl RawTableInner { /// Sets a control byte, and possibly also the replicated control byte at /// the end of the array. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// You must observe the following safety rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn set_ctrl(&self, index: usize, ctrl: u8) { + unsafe fn set_ctrl(&mut self, index: usize, ctrl: u8) { // Replicate the first Group::WIDTH control bytes at the end of - // the array without using a branch: + // the array without using a branch. If the tables smaller than + // the group width (self.buckets() < Group::WIDTH), + // `index2 = Group::WIDTH + index`, otherwise `index2` is: + // // - If index >= Group::WIDTH then index == index2. // - Otherwise index2 == self.bucket_mask + 1 + index. // @@ -1311,16 +2764,43 @@ impl RawTableInner { // --------------------------------------------- // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] | // --------------------------------------------- + + // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH` + // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`] *self.ctrl(index) = ctrl; *self.ctrl(index2) = ctrl; } /// Returns a pointer to a control byte. + /// + /// # Safety + /// + /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`], + /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`. + /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH` + /// will return a pointer to the end of the allocated table and it is useless on its own. + /// + /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a + /// table that has not been allocated results in [`Undefined Behavior`]. + /// + /// So to satisfy both requirements you should always follow the rule that + /// `index < self.bucket_mask + 1 + Group::WIDTH` + /// + /// Calling this function on [`RawTableInner`] that are not already allocated is safe + /// for read-only purpose. + /// + /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`Bucket::as_ptr()`]: Bucket::as_ptr() + /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn ctrl(&self, index: usize) -> *mut u8 { debug_assert!(index < self.num_ctrl_bytes()); + // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`] self.ctrl.as_ptr().add(index) } @@ -1329,6 +2809,17 @@ impl RawTableInner { self.bucket_mask + 1 } + /// Checks whether the bucket at `index` is full. + /// + /// # Safety + /// + /// The caller must ensure `index` is less than the number of buckets. + #[inline] + unsafe fn is_bucket_full(&self, index: usize) -> bool { + debug_assert!(index < self.buckets()); + is_full(*self.ctrl(index)) + } + #[inline] fn num_ctrl_bytes(&self) -> usize { self.bucket_mask + 1 + Group::WIDTH @@ -1339,25 +2830,45 @@ impl RawTableInner { self.bucket_mask == 0 } + /// Attempts to allocate a new hash table with at least enough capacity + /// for inserting the given number of elements without reallocating, + /// and return it inside ScopeGuard to protect against panic in the hash + /// function. + /// + /// # Note + /// + /// It is recommended (but not required): + /// + /// * That the new table's `capacity` be greater than or equal to `self.items`. + /// + /// * The `alloc` is the same [`Allocator`] as the `Allocator` used + /// to allocate this table. + /// + /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used + /// to allocate this table. + /// + /// If `table_layout` does not match the `TableLayout` that was used to allocate + /// this table, then using `mem::swap` with the `self` and the new table returned + /// by this function results in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::mut_mut)] #[inline] - unsafe fn prepare_resize( + fn prepare_resize<'a, A>( &self, + alloc: &'a A, table_layout: TableLayout, capacity: usize, fallibility: Fallibility, - ) -> Result, TryReserveError> { + ) -> Result, TryReserveError> + where + A: Allocator, + { debug_assert!(self.items <= capacity); // Allocate and initialize the new table. - let mut new_table = RawTableInner::fallible_with_capacity( - self.alloc.clone(), - table_layout, - capacity, - fallibility, - )?; - new_table.growth_left -= self.items; - new_table.items = self.items; + let new_table = + RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?; // The hash function may panic, in which case we simply free the new // table without dropping any elements that may have been copied into @@ -1367,7 +2878,11 @@ impl RawTableInner { // the comment at the bottom of this function. Ok(guard(new_table, move |self_| { if !self_.is_empty_singleton() { - self_.free_buckets(table_layout); + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. We know for sure that the `alloc` and `table_layout` matches the + // [`Allocator`] and [`TableLayout`] used to allocate this table. + unsafe { self_.free_buckets(alloc, table_layout) }; } })) } @@ -1376,16 +2891,38 @@ impl RawTableInner { /// /// This uses dynamic dispatch to reduce the amount of /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used + /// to allocate this table. + /// + /// * The `layout` must be the same [`TableLayout`] as the `TableLayout` + /// used to allocate this table. + /// + /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of + /// the elements stored in the table. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[inline(always)] - unsafe fn reserve_rehash_inner( + unsafe fn reserve_rehash_inner( &mut self, + alloc: &A, additional: usize, hasher: &dyn Fn(&mut Self, usize) -> u64, fallibility: Fallibility, layout: TableLayout, drop: Option, - ) -> Result<(), TryReserveError> { + ) -> Result<(), TryReserveError> + where + A: Allocator, + { // Avoid `Option::ok_or_else` because it bloats LLVM IR. let new_items = match self.items.checked_add(additional) { Some(new_items) => new_items, @@ -1395,12 +2932,30 @@ impl RawTableInner { if new_items <= full_capacity / 2 { // Rehash in-place without re-allocating if we have plenty of spare // capacity that is locked up due to DELETED entries. + + // SAFETY: + // 1. We know for sure that `[`RawTableInner`]` has already been allocated + // (since new_items <= full_capacity / 2); + // 2. The caller ensures that `drop` function is the actual drop function of + // the elements stored in the table. + // 3. The caller ensures that `layout` matches the [`TableLayout`] that was + // used to allocate this table. + // 4. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. self.rehash_in_place(hasher, layout.size, drop); Ok(()) } else { // Otherwise, conservatively resize to at least the next size up // to avoid churning deletes into frequent rehashes. + // + // SAFETY: + // 1. We know for sure that `capacity >= self.items`. + // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. self.resize_inner( + alloc, usize::max(new_items, full_capacity + 1), hasher, fallibility, @@ -1409,48 +2964,160 @@ impl RawTableInner { } } + /// Returns an iterator over full buckets indices in the table. + /// + /// # Safety + /// + /// Behavior is undefined if any of the following conditions are violated: + /// + /// * The caller has to ensure that the `RawTableInner` outlives the + /// `FullBucketsIndices`. Because we cannot make the `next` method + /// unsafe on the `FullBucketsIndices` struct, we have to make the + /// `full_buckets_indices` method unsafe. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + #[inline(always)] + unsafe fn full_buckets_indices(&self) -> FullBucketsIndices { + // SAFETY: + // 1. Since the caller of this function ensures that the control bytes + // are properly initialized and `self.ctrl(0)` points to the start + // of the array of control bytes, therefore: `ctrl` is valid for reads, + // properly aligned to `Group::WIDTH` and points to the properly initialized + // control bytes. + // 2. The value of `items` is equal to the amount of data (values) added + // to the table. + // + // `ctrl` points here (to the start + // of the first control byte `CT0`) + // ∨ + // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH + // \________ ________/ + // \/ + // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`. + let ctrl = NonNull::new_unchecked(self.ctrl(0)); + + FullBucketsIndices { + // Load the first group + // SAFETY: See explanation above. + current_group: Group::load_aligned(ctrl.as_ptr()).match_full().into_iter(), + group_first_index: 0, + ctrl, + items: self.items, + } + } + /// Allocates a new table of a different size and moves the contents of the /// current table into it. /// /// This uses dynamic dispatch to reduce the amount of /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used + /// to allocate this table; + /// + /// * The `layout` must be the same [`TableLayout`] as the `TableLayout` + /// used to allocate this table; + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// The caller of this function must ensure that `capacity >= self.items` + /// otherwise: + /// + /// * If `self.items != 0`, calling of this function with `capacity == 0` + /// results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and + /// `self.items > capacity_to_buckets(capacity)` calling this function + /// results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and + /// `self.items > capacity_to_buckets(capacity)` calling this function + /// are never return (will go into an infinite loop). + /// + /// Note: It is recommended (but not required) that the new table's `capacity` + /// be greater than or equal to `self.items`. In case if `capacity <= self.items` + /// this function can never return. See [`RawTableInner::find_insert_slot`] for + /// more information. + /// + /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[inline(always)] - unsafe fn resize_inner( + unsafe fn resize_inner( &mut self, + alloc: &A, capacity: usize, hasher: &dyn Fn(&mut Self, usize) -> u64, fallibility: Fallibility, layout: TableLayout, - ) -> Result<(), TryReserveError> { - let mut new_table = self.prepare_resize(layout, capacity, fallibility)?; - - // Copy all elements to the new table. - for i in 0..self.buckets() { - if !is_full(*self.ctrl(i)) { - continue; - } - + ) -> Result<(), TryReserveError> + where + A: Allocator, + { + // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`] + // that were used to allocate this table. + let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?; + + // SAFETY: We know for sure that RawTableInner will outlive the + // returned `FullBucketsIndices` iterator, and the caller of this + // function ensures that the control bytes are properly initialized. + for full_byte_index in self.full_buckets_indices() { // This may panic. - let hash = hasher(self, i); + let hash = hasher(self, full_byte_index); + // SAFETY: // We can use a simpler version of insert() here since: - // - there are no DELETED entries. - // - we know there is enough space in the table. - // - all elements are unique. - let (index, _) = new_table.prepare_insert_slot(hash); - + // 1. There are no DELETED entries. + // 2. We know there is enough space in the table. + // 3. All elements are unique. + // 4. The caller of this function guarantees that `capacity > 0` + // so `new_table` must already have some allocated memory. + // 5. We set `growth_left` and `items` fields of the new table + // after the loop. + // 6. We insert into the table, at the returned index, the data + // matching the given hash immediately after calling this function. + let (new_index, _) = new_table.prepare_insert_slot(hash); + + // SAFETY: + // + // * `src` is valid for reads of `layout.size` bytes, since the + // table is alive and the `full_byte_index` is guaranteed to be + // within bounds (see `FullBucketsIndices::next_impl`); + // + // * `dst` is valid for writes of `layout.size` bytes, since the + // caller ensures that `table_layout` matches the [`TableLayout`] + // that was used to allocate old table and we have the `new_index` + // returned by `prepare_insert_slot`. + // + // * Both `src` and `dst` are properly aligned. + // + // * Both `src` and `dst` point to different region of memory. ptr::copy_nonoverlapping( - self.bucket_ptr(i, layout.size), - new_table.bucket_ptr(index, layout.size), + self.bucket_ptr(full_byte_index, layout.size), + new_table.bucket_ptr(new_index, layout.size), layout.size, ); } + // The hash function didn't panic, so we can safely set the + // `growth_left` and `items` fields of the new table. + new_table.growth_left -= self.items; + new_table.items = self.items; + // We successfully copied all elements without panicking. Now replace // self with the new table. The old table will have its memory freed but // the items will not be dropped (since they have been moved into the // new table). + // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`] + // that was used to allocate this table. mem::swap(self, &mut new_table); Ok(()) @@ -1459,10 +3126,25 @@ impl RawTableInner { /// Rehashes the contents of the table in place (i.e. without changing the /// allocation). /// - /// If `hasher` panics then some the table's contents may be lost. + /// If `hasher` panics then some the table's contents may be lost. + /// + /// This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * The `size_of` must be equal to the size of the elements stored in the table; + /// + /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of + /// the elements stored in the table. + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. /// - /// This uses dynamic dispatch to reduce the amount of - /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[cfg_attr(feature = "inline-more", inline(always))] #[cfg_attr(not(feature = "inline-more"), inline)] @@ -1506,8 +3188,10 @@ impl RawTableInner { let hash = hasher(*guard, i); // Search for a suitable place to put it - let new_i = guard.find_insert_slot(hash); - let new_i_p = guard.bucket_ptr(new_i, size_of); + // + // SAFETY: Caller of this function ensures that the control bytes + // are properly initialized. + let new_i = guard.find_insert_slot(hash).index; // Probing works by scanning through all of the control // bytes in groups, which may not be aligned to the group @@ -1519,6 +3203,8 @@ impl RawTableInner { continue 'outer; } + let new_i_p = guard.bucket_ptr(new_i, size_of); + // We are moving the current item to a new position. Write // our H2 to the control byte of the new position. let prev_ctrl = guard.replace_ctrl_h2(new_i, hash); @@ -1545,17 +3231,107 @@ impl RawTableInner { mem::forget(guard); } + /// Deallocates the table without dropping any entries. + /// + /// # Note + /// + /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements), + /// else it can lead to leaking of memory. Also calling this function automatically + /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid + /// (dangling) the `ctrl` field of the table. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`Undefined Behavior`]: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used + /// to allocate this table. + /// + /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used + /// to allocate this table. + /// + /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. + /// + /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc + /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate + #[inline] + unsafe fn free_buckets(&mut self, alloc: &A, table_layout: TableLayout) + where + A: Allocator, + { + // SAFETY: The caller must uphold the safety contract for `free_buckets` + // method. + let (ptr, layout) = self.allocation_info(table_layout); + alloc.deallocate(ptr, layout); + } + + /// Returns a pointer to the allocated memory and the layout that was used to + /// allocate the table. + /// + /// # Safety + /// + /// Caller of this function must observe the following safety rules: + /// + /// * The [`RawTableInner`] has already been allocated, otherwise + /// calling this function results in [`undefined behavior`] + /// + /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` + /// that was used to allocate this table. Failure to comply with this condition + /// may result in [`undefined behavior`]. + /// + /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc + /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate #[inline] - unsafe fn free_buckets(&mut self, table_layout: TableLayout) { + unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull, Layout) { + debug_assert!( + !self.is_empty_singleton(), + "this function can only be called on non-empty tables" + ); + // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) { Some(lco) => lco, - None => hint::unreachable_unchecked(), + None => unsafe { hint::unreachable_unchecked() }, }; - self.alloc.deallocate( - NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)), + ( + // SAFETY: The caller must uphold the safety contract for `allocation_info` method. + unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) }, layout, - ); + ) + } + + /// Returns a pointer to the allocated memory and the layout that was used to + /// allocate the table. If [`RawTableInner`] has not been allocated, this + /// function return `dangling` pointer and `()` (unit) layout. + /// + /// # Safety + /// + /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout` + /// that was used to allocate this table. Failure to comply with this condition + /// may result in [`undefined behavior`]. + /// + /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc + /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate + #[cfg(feature = "raw")] + unsafe fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull, Layout) { + if self.is_empty_singleton() { + (NonNull::dangling(), Layout::new::<()>()) + } else { + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. The caller ensures that `table_layout` matches the [`TableLayout`] + // that was used to allocate this table. + unsafe { self.allocation_info(table_layout) } + } } /// Marks all table buckets as empty without dropping their contents. @@ -1570,27 +3346,95 @@ impl RawTableInner { self.growth_left = bucket_mask_to_capacity(self.bucket_mask); } + /// Erases the [`Bucket`]'s control byte at the given index so that it does not + /// triggered as full, decreases the `items` of the table and, if it can be done, + /// increases `self.growth_left`. + /// + /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it + /// does not make any changes to the `data` parts of the table. The caller of this + /// function must take care to properly drop the `data`, otherwise calling this + /// function may result in a memory leak. + /// + /// # Safety + /// + /// You must observe the following safety rules when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * It must be the full control byte at the given position; + /// + /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e. + /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must + /// be no greater than the number returned by the function [`RawTableInner::buckets`]. + /// + /// Calling this function on a table that has not been allocated results in [`undefined behavior`]. + /// + /// Calling this function on a table with no elements is unspecified, but calling subsequent + /// functions is likely to result in [`undefined behavior`] due to overflow subtraction + /// (`self.items -= 1 cause overflow when self.items == 0`). + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn erase(&mut self, index: usize) { - debug_assert!(is_full(*self.ctrl(index))); + debug_assert!(self.is_bucket_full(index)); + + // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because + // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; + // SAFETY: + // - The caller must uphold the safety contract for `erase` method; + // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask` let empty_before = Group::load(self.ctrl(index_before)).match_empty(); let empty_after = Group::load(self.ctrl(index)).match_empty(); - // If we are inside a continuous block of Group::WIDTH full or deleted - // cells then a probe window may have seen a full block when trying to - // insert. We therefore need to keep that block non-empty so that - // lookups will continue searching to the next probe window. + // Inserting and searching in the map is performed by two key functions: // - // Note that in this context `leading_zeros` refers to the bytes at the - // end of a group, while `trailing_zeros` refers to the bytes at the - // beginning of a group. + // - The `find_insert_slot` function that looks up the index of any `EMPTY` or `DELETED` + // slot in a group to be able to insert. If it doesn't find an `EMPTY` or `DELETED` + // slot immediately in the first group, it jumps to the next `Group` looking for it, + // and so on until it has gone through all the groups in the control bytes. + // + // - The `find_inner` function that looks for the index of the desired element by looking + // at all the `FULL` bytes in the group. If it did not find the element right away, and + // there is no `EMPTY` byte in the group, then this means that the `find_insert_slot` + // function may have found a suitable slot in the next group. Therefore, `find_inner` + // jumps further, and if it does not find the desired element and again there is no `EMPTY` + // byte, then it jumps further, and so on. The search stops only if `find_inner` function + // finds the desired element or hits an `EMPTY` slot/byte. + // + // Accordingly, this leads to two consequences: + // + // - The map must have `EMPTY` slots (bytes); + // + // - You can't just mark the byte to be erased as `EMPTY`, because otherwise the `find_inner` + // function may stumble upon an `EMPTY` byte before finding the desired element and stop + // searching. + // + // Thus it is necessary to check all bytes after and before the erased element. If we are in + // a contiguous `Group` of `FULL` or `DELETED` bytes (the number of `FULL` or `DELETED` bytes + // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as + // `DELETED` in order for the `find_inner` function to go further. On the other hand, if there + // is at least one `EMPTY` slot in the `Group`, then the `find_inner` function will still stumble + // upon an `EMPTY` byte, so we can safely mark our erased byte as `EMPTY` as well. + // + // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index` + // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH) + // cannot have `DELETED` bytes. + // + // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while + // `trailing_zeros` refers to the bytes at the beginning of a group. let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { DELETED } else { self.growth_left += 1; EMPTY }; + // SAFETY: the caller must uphold the safety contract for `erase` method. self.set_ctrl(index, ctrl); self.items -= 1; } @@ -1599,12 +3443,16 @@ impl RawTableInner { impl Clone for RawTable { fn clone(&self) -> Self { if self.table.is_empty_singleton() { - Self::new_in(self.table.alloc.clone()) + Self::new_in(self.alloc.clone()) } else { unsafe { // Avoid `Result::ok_or_else` because it bloats LLVM IR. - let new_table = match Self::new_uninitialized( - self.table.alloc.clone(), + // + // SAFETY: This is safe as we are taking the size of an already allocated table + // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power + // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`. + let mut new_table = match Self::new_uninitialized( + self.alloc.clone(), self.table.buckets(), Fallibility::Infallible, ) { @@ -1612,24 +3460,32 @@ impl Clone for RawTable { Err(_) => hint::unreachable_unchecked(), }; - // If cloning fails then we need to free the allocation for the - // new table. However we don't run its drop since its control - // bytes are not initialized yet. - let mut guard = guard(ManuallyDrop::new(new_table), |new_table| { - new_table.free_buckets(); - }); - - guard.clone_from_spec(self); - - // Disarm the scope guard and return the newly created table. - ManuallyDrop::into_inner(ScopeGuard::into_inner(guard)) + // Cloning elements may fail (the clone function may panic). But we don't + // need to worry about uninitialized control bits, since: + // 1. The number of items (elements) in the table is zero, which means that + // the control bits will not be readed by Drop function. + // 2. The `clone_from_spec` method will first copy all control bits from + // `self` (thus initializing them). But this will not affect the `Drop` + // function, since the `clone_from_spec` function sets `items` only after + // successfully clonning all elements. + new_table.clone_from_spec(self); + new_table } } } fn clone_from(&mut self, source: &Self) { if source.table.is_empty_singleton() { - *self = Self::new_in(self.table.alloc.clone()); + let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); + } } else { unsafe { // Make sure that if any panics occurs, we clear the table and @@ -1644,27 +3500,38 @@ impl Clone for RawTable { // // This leak is unavoidable: we can't try dropping more elements // since this could lead to another panic and abort the process. - self_.drop_elements(); + // + // SAFETY: If something gets wrong we clear our table right after + // dropping the elements, so there is no double drop, since `items` + // will be equal to zero. + self_.table.drop_elements::(); // If necessary, resize our table to match the source. if self_.buckets() != source.buckets() { - // Skip our drop by using ptr::write. - if !self_.table.is_empty_singleton() { - self_.free_buckets(); + let new_inner = match RawTableInner::new_uninitialized( + &self_.alloc, + Self::TABLE_LAYOUT, + source.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }; + // Replace the old inner with new uninitialized one. It's ok, since if something gets + // wrong `ScopeGuard` will initialize all control bytes and leave empty table. + let mut old_inner = mem::replace(&mut self_.table, new_inner); + if !old_inner.is_empty_singleton() { + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. We know for sure that `alloc` and `table_layout` matches + // the [`Allocator`] and [`TableLayout`] that were used to allocate this table. + old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT); } - (&mut **self_ as *mut Self).write( - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - match Self::new_uninitialized( - self_.table.alloc.clone(), - source.buckets(), - Fallibility::Infallible, - ) { - Ok(table) => table, - Err(_) => hint::unreachable_unchecked(), - }, - ); } + // Cloning elements may fail (the clone function may panic), but the `ScopeGuard` + // inside the `clone_from_impl` function will take care of that, dropping all + // cloned elements if necessary. Our `ScopeGuard` will clear the table. self_.clone_from_spec(source); // Disarm the scope guard if cloning was successful. @@ -1696,7 +3563,8 @@ impl RawTableClone for RawTable { .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); source .data_start() - .copy_to_nonoverlapping(self.data_start(), self.table.buckets()); + .as_ptr() + .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets()); self.table.items = source.table.items; self.table.growth_left = source.table.growth_left; @@ -1720,9 +3588,9 @@ impl RawTable { // to make sure we drop only the elements that have been // cloned so far. let mut guard = guard((0, &mut *self), |(index, self_)| { - if mem::needs_drop::() && !self_.is_empty() { + if T::NEEDS_DROP { for i in 0..=*index { - if is_full(*self_.table.ctrl(i)) { + if self_.is_bucket_full(i) { self_.bucket(i).drop(); } } @@ -1757,7 +3625,7 @@ impl RawTable { { self.clear(); - let guard_self = guard(&mut *self, |self_| { + let mut guard_self = guard(&mut *self, |self_| { // Clear the partially copied table if a panic occurs, otherwise // items and growth_left will be out of sync with the contents // of the table. @@ -1790,7 +3658,7 @@ impl RawTable { } } -impl Default for RawTable { +impl Default for RawTable { #[inline] fn default() -> Self { Self::new_in(Default::default()) @@ -1798,31 +3666,41 @@ impl Default for RawTable { } #[cfg(feature = "nightly")] -unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable { +unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { - if !self.table.is_empty_singleton() { - unsafe { - self.drop_elements(); - self.free_buckets(); - } + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If the drop function of any elements fails, then only a memory leak will occur, + // and we don't care because we are inside the `Drop` function of the `RawTable`, + // so there won't be any table left in an inconsistent state. + self.table + .drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); } } } #[cfg(not(feature = "nightly"))] -impl Drop for RawTable { +impl Drop for RawTable { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { - if !self.table.is_empty_singleton() { - unsafe { - self.drop_elements(); - self.free_buckets(); - } + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If the drop function of any elements fails, then only a memory leak will occur, + // and we don't care because we are inside the `Drop` function of the `RawTable`, + // so there won't be any table left in an inconsistent state. + self.table + .drop_inner_table::(&self.alloc, Self::TABLE_LAYOUT); } } } -impl IntoIterator for RawTable { +impl IntoIterator for RawTable { type Item = T; type IntoIter = RawIntoIter; @@ -1840,7 +3718,7 @@ impl IntoIterator for RawTable { pub(crate) struct RawIterRange { // Mask of full buckets in the current group. Bits are cleared from this // mask as each element is processed. - current_group: BitMask, + current_group: BitMaskIter, // Pointer to the buckets for the current group. data: Bucket, @@ -1856,19 +3734,44 @@ pub(crate) struct RawIterRange { impl RawIterRange { /// Returns a `RawIterRange` covering a subset of a table. /// - /// The control byte address must be aligned to the group size. + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`; + /// + /// * `ctrl` must be properly aligned to the group size (Group::WIDTH); + /// + /// * `ctrl` must point to the array of properly initialized control bytes; + /// + /// * `data` must be the [`Bucket`] at the `ctrl` index in the table; + /// + /// * the value of `len` must be less than or equal to the number of table buckets, + /// and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())` + /// must be positive. + /// + /// * The `ctrl.add(len)` pointer must be either in bounds or one + /// byte past the end of the same [allocated table]. + /// + /// * The `len` must be a power of two. + /// + /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[cfg_attr(feature = "inline-more", inline)] unsafe fn new(ctrl: *const u8, data: Bucket, len: usize) -> Self { debug_assert_ne!(len, 0); debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); + // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] let end = ctrl.add(len); // Load the first group and advance ctrl to point to the next group + // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] let current_group = Group::load_aligned(ctrl).match_full(); let next_ctrl = ctrl.add(Group::WIDTH); Self { - current_group, + current_group: current_group.into_iter(), data, next_ctrl, end, @@ -1925,8 +3828,7 @@ impl RawIterRange { #[cfg_attr(feature = "inline-more", inline)] unsafe fn next_impl(&mut self) -> Option> { loop { - if let Some(index) = self.current_group.lowest_set_bit() { - self.current_group = self.current_group.remove_lowest_bit(); + if let Some(index) = self.current_group.next() { return Some(self.data.next_n(index)); } @@ -1939,7 +3841,7 @@ impl RawIterRange { // than the group size where the trailing control bytes are all // EMPTY. On larger tables self.end is guaranteed to be aligned // to the group size (since tables are power-of-two sized). - self.current_group = Group::load_aligned(self.next_ctrl).match_full(); + self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter(); self.data = self.data.next_n(Group::WIDTH); self.next_ctrl = self.next_ctrl.add(Group::WIDTH); } @@ -2016,7 +3918,7 @@ impl RawIter { /// This method should be called _before_ the removal is made. It is not necessary to call this /// method if you are removing an item that this iterator yielded in the past. #[cfg(feature = "raw")] - pub fn reflect_remove(&mut self, b: &Bucket) { + pub unsafe fn reflect_remove(&mut self, b: &Bucket) { self.reflect_toggle_full(b, false); } @@ -2030,36 +3932,76 @@ impl RawIter { /// /// This method should be called _after_ the given insert is made. #[cfg(feature = "raw")] - pub fn reflect_insert(&mut self, b: &Bucket) { + pub unsafe fn reflect_insert(&mut self, b: &Bucket) { self.reflect_toggle_full(b, true); } /// Refresh the iterator so that it reflects a change to the state of the given bucket. #[cfg(feature = "raw")] - fn reflect_toggle_full(&mut self, b: &Bucket, is_insert: bool) { - unsafe { - if b.as_ptr() > self.iter.data.as_ptr() { - // The iterator has already passed the bucket's group. - // So the toggle isn't relevant to this iterator. - return; + unsafe fn reflect_toggle_full(&mut self, b: &Bucket, is_insert: bool) { + if b.as_ptr() > self.iter.data.as_ptr() { + // The iterator has already passed the bucket's group. + // So the toggle isn't relevant to this iterator. + return; + } + + if self.iter.next_ctrl < self.iter.end + && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr() + { + // The iterator has not yet reached the bucket's group. + // We don't need to reload anything, but we do need to adjust the item count. + + if cfg!(debug_assertions) { + // Double-check that the user isn't lying to us by checking the bucket state. + // To do that, we need to find its control byte. We know that self.iter.data is + // at self.iter.next_ctrl - Group::WIDTH, so we work from there: + let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr()); + let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset); + // This method should be called _before_ a removal, or _after_ an insert, + // so in both cases the ctrl byte should indicate that the bucket is full. + assert!(is_full(*ctrl)); } - if self.iter.next_ctrl < self.iter.end - && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr() - { - // The iterator has not yet reached the bucket's group. - // We don't need to reload anything, but we do need to adjust the item count. + if is_insert { + self.items += 1; + } else { + self.items -= 1; + } - if cfg!(debug_assertions) { - // Double-check that the user isn't lying to us by checking the bucket state. - // To do that, we need to find its control byte. We know that self.iter.data is - // at self.iter.next_ctrl - Group::WIDTH, so we work from there: - let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr()); - let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset); - // This method should be called _before_ a removal, or _after_ an insert, - // so in both cases the ctrl byte should indicate that the bucket is full. - assert!(is_full(*ctrl)); - } + return; + } + + // The iterator is at the bucket group that the toggled bucket is in. + // We need to do two things: + // + // - Determine if the iterator already yielded the toggled bucket. + // If it did, we're done. + // - Otherwise, update the iterator cached group so that it won't + // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket. + // We'll also need to update the item count accordingly. + if let Some(index) = self.iter.current_group.0.lowest_set_bit() { + let next_bucket = self.iter.data.next_n(index); + if b.as_ptr() > next_bucket.as_ptr() { + // The toggled bucket is "before" the bucket the iterator would yield next. We + // therefore don't need to do anything --- the iterator has already passed the + // bucket in question. + // + // The item count must already be correct, since a removal or insert "prior" to + // the iterator's position wouldn't affect the item count. + } else { + // The removed bucket is an upcoming bucket. We need to make sure it does _not_ + // get yielded, and also that it's no longer included in the item count. + // + // NOTE: We can't just reload the group here, both since that might reflect + // inserts we've already passed, and because that might inadvertently unset the + // bits for _other_ removals. If we do that, we'd have to also decrement the + // item count for those other bits that we unset. But the presumably subsequent + // call to reflect for those buckets might _also_ decrement the item count. + // Instead, we _just_ flip the bit for the particular bucket the caller asked + // us to reflect. + let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr()); + let was_full = self.iter.current_group.flip(our_bit); + debug_assert_ne!(was_full, is_insert); if is_insert { self.items += 1; @@ -2067,65 +4009,23 @@ impl RawIter { self.items -= 1; } - return; - } - - // The iterator is at the bucket group that the toggled bucket is in. - // We need to do two things: - // - // - Determine if the iterator already yielded the toggled bucket. - // If it did, we're done. - // - Otherwise, update the iterator cached group so that it won't - // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket. - // We'll also need to update the item count accordingly. - if let Some(index) = self.iter.current_group.lowest_set_bit() { - let next_bucket = self.iter.data.next_n(index); - if b.as_ptr() > next_bucket.as_ptr() { - // The toggled bucket is "before" the bucket the iterator would yield next. We - // therefore don't need to do anything --- the iterator has already passed the - // bucket in question. - // - // The item count must already be correct, since a removal or insert "prior" to - // the iterator's position wouldn't affect the item count. - } else { - // The removed bucket is an upcoming bucket. We need to make sure it does _not_ - // get yielded, and also that it's no longer included in the item count. - // - // NOTE: We can't just reload the group here, both since that might reflect - // inserts we've already passed, and because that might inadvertently unset the - // bits for _other_ removals. If we do that, we'd have to also decrement the - // item count for those other bits that we unset. But the presumably subsequent - // call to reflect for those buckets might _also_ decrement the item count. - // Instead, we _just_ flip the bit for the particular bucket the caller asked - // us to reflect. - let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr()); - let was_full = self.iter.current_group.flip(our_bit); - debug_assert_ne!(was_full, is_insert); - - if is_insert { - self.items += 1; + if cfg!(debug_assertions) { + if b.as_ptr() == next_bucket.as_ptr() { + // The removed bucket should no longer be next + debug_assert_ne!(self.iter.current_group.0.lowest_set_bit(), Some(index)); } else { - self.items -= 1; - } - - if cfg!(debug_assertions) { - if b.as_ptr() == next_bucket.as_ptr() { - // The removed bucket should no longer be next - debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index)); - } else { - // We should not have changed what bucket comes next. - debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index)); - } + // We should not have changed what bucket comes next. + debug_assert_eq!(self.iter.current_group.0.lowest_set_bit(), Some(index)); } } - } else { - // We must have already iterated past the removed item. } + } else { + // We must have already iterated past the removed item. } } unsafe fn drop_elements(&mut self) { - if mem::needs_drop::() && self.len() != 0 { + if T::NEEDS_DROP && self.items != 0 { for item in self { item.drop(); } @@ -2159,9 +4059,8 @@ impl Iterator for RawIter { self.iter.next_impl::() }; - if nxt.is_some() { - self.items -= 1; - } + debug_assert!(nxt.is_some()); + self.items -= 1; nxt } @@ -2175,28 +4074,146 @@ impl Iterator for RawIter { impl ExactSizeIterator for RawIter {} impl FusedIterator for RawIter {} +/// Iterator which returns an index of every full bucket in the table. +/// +/// For maximum flexibility this iterator is not bound by a lifetime, but you +/// must observe several rules when using it: +/// - You must not free the hash table while iterating (including via growing/shrinking). +/// - It is fine to erase a bucket that has been yielded by the iterator. +/// - Erasing a bucket that has not yet been yielded by the iterator may still +/// result in the iterator yielding index of that bucket. +/// - It is unspecified whether an element inserted after the iterator was +/// created will be yielded by that iterator. +/// - The order in which the iterator yields indices of the buckets is unspecified +/// and may change in the future. +pub(crate) struct FullBucketsIndices { + // Mask of full buckets in the current group. Bits are cleared from this + // mask as each element is processed. + current_group: BitMaskIter, + + // Initial value of the bytes' indices of the current group (relative + // to the start of the control bytes). + group_first_index: usize, + + // Pointer to the current group of control bytes, + // Must be aligned to the group size (Group::WIDTH). + ctrl: NonNull, + + // Number of elements in the table. + items: usize, +} + +impl FullBucketsIndices { + /// Advances the iterator and returns the next value. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`Undefined Behavior`]: + /// + /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved, + /// i.e. table outlives the `FullBucketsIndices`; + /// + /// * It never tries to iterate after getting all elements. + /// + /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline(always)] + unsafe fn next_impl(&mut self) -> Option { + loop { + if let Some(index) = self.current_group.next() { + // The returned `self.group_first_index + index` will always + // be in the range `0..self.buckets()`. See explanation below. + return Some(self.group_first_index + index); + } + + // SAFETY: The caller of this function ensures that: + // + // 1. It never tries to iterate after getting all the elements; + // 2. The table is alive and did not moved; + // 3. The first `self.ctrl` pointed to the start of the array of control bytes. + // + // Taking the above into account, we always stay within the bounds, because: + // + // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH), + // we will never end up in the given branch, since we should have already + // yielded all the elements of the table. + // + // 2. For tables larger than the group width. The the number of buckets is a + // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Sinse + // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the + // the start of the array of control bytes, and never try to iterate after + // getting all the elements, the last `self.ctrl` will be equal to + // the `self.buckets() - Group::WIDTH`, so `self.current_group.next()` + // will always contains indices within the range `0..Group::WIDTH`, + // and subsequent `self.group_first_index + index` will always return a + // number less than `self.buckets()`. + self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH)); + + // SAFETY: See explanation above. + self.current_group = Group::load_aligned(self.ctrl.as_ptr()) + .match_full() + .into_iter(); + self.group_first_index += Group::WIDTH; + } + } +} + +impl Iterator for FullBucketsIndices { + type Item = usize; + + /// Advances the iterator and returns the next value. It is up to + /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`, + /// because we cannot make the `next` method unsafe. + #[inline(always)] + fn next(&mut self) -> Option { + // Return if we already yielded all items. + if self.items == 0 { + return None; + } + + let nxt = unsafe { + // SAFETY: + // 1. We check number of items to yield using `items` field. + // 2. The caller ensures that the table is alive and has not moved. + self.next_impl() + }; + + debug_assert!(nxt.is_some()); + self.items -= 1; + + nxt + } + + #[inline(always)] + fn size_hint(&self) -> (usize, Option) { + (self.items, Some(self.items)) + } +} + +impl ExactSizeIterator for FullBucketsIndices {} +impl FusedIterator for FullBucketsIndices {} + /// Iterator which consumes a table and returns elements. -pub struct RawIntoIter { +pub struct RawIntoIter { iter: RawIter, - allocation: Option<(NonNull, Layout)>, + allocation: Option<(NonNull, Layout, A)>, marker: PhantomData, - alloc: A, } -impl RawIntoIter { +impl RawIntoIter { #[cfg_attr(feature = "inline-more", inline)] pub fn iter(&self) -> RawIter { self.iter.clone() } } -unsafe impl Send for RawIntoIter +unsafe impl Send for RawIntoIter where T: Send, A: Send, { } -unsafe impl Sync for RawIntoIter +unsafe impl Sync for RawIntoIter where T: Sync, A: Sync, @@ -2204,7 +4221,7 @@ where } #[cfg(feature = "nightly")] -unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { +unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -2212,14 +4229,14 @@ unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { self.iter.drop_elements(); // Free the table - if let Some((ptr, layout)) = self.allocation { - self.alloc.deallocate(ptr, layout); + if let Some((ptr, layout, ref alloc)) = self.allocation { + alloc.deallocate(ptr, layout); } } } } #[cfg(not(feature = "nightly"))] -impl Drop for RawIntoIter { +impl Drop for RawIntoIter { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -2227,14 +4244,14 @@ impl Drop for RawIntoIter { self.iter.drop_elements(); // Free the table - if let Some((ptr, layout)) = self.allocation { - self.alloc.deallocate(ptr, layout); + if let Some((ptr, layout, ref alloc)) = self.allocation { + alloc.deallocate(ptr, layout); } } } } -impl Iterator for RawIntoIter { +impl Iterator for RawIntoIter { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -2248,45 +4265,45 @@ impl Iterator for RawIntoIter { } } -impl ExactSizeIterator for RawIntoIter {} -impl FusedIterator for RawIntoIter {} +impl ExactSizeIterator for RawIntoIter {} +impl FusedIterator for RawIntoIter {} /// Iterator which consumes elements without freeing the table storage. -pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { +pub struct RawDrain<'a, T, A: Allocator = Global> { iter: RawIter, // The table is moved into the iterator for the duration of the drain. This // ensures that an empty table is left if the drain iterator is leaked // without dropping. - table: ManuallyDrop>, - orig_table: NonNull>, + table: RawTableInner, + orig_table: NonNull, // We don't use a &'a mut RawTable because we want RawDrain to be // covariant over T. marker: PhantomData<&'a RawTable>, } -impl RawDrain<'_, T, A> { +impl RawDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] pub fn iter(&self) -> RawIter { self.iter.clone() } } -unsafe impl Send for RawDrain<'_, T, A> +unsafe impl Send for RawDrain<'_, T, A> where T: Send, A: Send, { } -unsafe impl Sync for RawDrain<'_, T, A> +unsafe impl Sync for RawDrain<'_, T, A> where T: Sync, A: Sync, { } -impl Drop for RawDrain<'_, T, A> { +impl Drop for RawDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -2300,12 +4317,12 @@ impl Drop for RawDrain<'_, T, A> { // Move the now empty table back to its original location. self.orig_table .as_ptr() - .copy_from_nonoverlapping(&*self.table, 1); + .copy_from_nonoverlapping(&self.table, 1); } } } -impl Iterator for RawDrain<'_, T, A> { +impl Iterator for RawDrain<'_, T, A> { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -2322,21 +4339,36 @@ impl Iterator for RawDrain<'_, T, A> { } } -impl ExactSizeIterator for RawDrain<'_, T, A> {} -impl FusedIterator for RawDrain<'_, T, A> {} +impl ExactSizeIterator for RawDrain<'_, T, A> {} +impl FusedIterator for RawDrain<'_, T, A> {} /// Iterator over occupied buckets that could match a given hash. /// /// `RawTable` only stores 7 bits of the hash value, so this iterator may return /// items that have a hash value different than the one provided. You should /// always validate the returned values before using them. -pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> { - inner: RawIterHashInner<'a, A>, +/// +/// For maximum flexibility this iterator is not bound by a lifetime, but you +/// must observe several rules when using it: +/// - You must not free the hash table while iterating (including via growing/shrinking). +/// - It is fine to erase a bucket that has been yielded by the iterator. +/// - Erasing a bucket that has not yet been yielded by the iterator may still +/// result in the iterator yielding that bucket. +/// - It is unspecified whether an element inserted after the iterator was +/// created will be yielded by that iterator. +/// - The order in which the iterator yields buckets is unspecified and may +/// change in the future. +pub struct RawIterHash { + inner: RawIterHashInner, _marker: PhantomData, } -struct RawIterHashInner<'a, A: Allocator + Clone> { - table: &'a RawTableInner, +struct RawIterHashInner { + // See `RawTableInner`'s corresponding fields for details. + // We can't store a `*const RawTableInner` as it would get + // invalidated by the user calling `&mut` methods on `RawTable`. + bucket_mask: usize, + ctrl: NonNull, // The top 7 bits of the hash. h2_hash: u8, @@ -2350,71 +4382,105 @@ struct RawIterHashInner<'a, A: Allocator + Clone> { bitmask: BitMaskIter, } -impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> { +impl RawIterHash { #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "raw")] - fn new(table: &'a RawTable, hash: u64) -> Self { + unsafe fn new(table: &RawTable, hash: u64) -> Self { RawIterHash { inner: RawIterHashInner::new(&table.table, hash), _marker: PhantomData, } } } -impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> { +impl RawIterHashInner { #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "raw")] - fn new(table: &'a RawTableInner, hash: u64) -> Self { - unsafe { - let h2_hash = h2(hash); - let probe_seq = table.probe_seq(hash); - let group = Group::load(table.ctrl(probe_seq.pos)); - let bitmask = group.match_byte(h2_hash).into_iter(); - - RawIterHashInner { - table, - h2_hash, - probe_seq, - group, - bitmask, - } + unsafe fn new(table: &RawTableInner, hash: u64) -> Self { + let h2_hash = h2(hash); + let probe_seq = table.probe_seq(hash); + let group = Group::load(table.ctrl(probe_seq.pos)); + let bitmask = group.match_byte(h2_hash).into_iter(); + + RawIterHashInner { + bucket_mask: table.bucket_mask, + ctrl: table.ctrl, + h2_hash, + probe_seq, + group, + bitmask, } } } -impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> { +impl Iterator for RawIterHash { type Item = Bucket; fn next(&mut self) -> Option> { unsafe { match self.inner.next() { - Some(index) => Some(self.inner.table.bucket(index)), + Some(index) => { + // Can't use `RawTable::bucket` here as we don't have + // an actual `RawTable` reference to use. + debug_assert!(index <= self.inner.bucket_mask); + let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index); + Some(bucket) + } None => None, } } } } -impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> { +impl Iterator for RawIterHashInner { type Item = usize; fn next(&mut self) -> Option { unsafe { loop { if let Some(bit) = self.bitmask.next() { - let index = (self.probe_seq.pos + bit) & self.table.bucket_mask; + let index = (self.probe_seq.pos + bit) & self.bucket_mask; return Some(index); } if likely(self.group.match_empty().any_bit_set()) { return None; } - self.probe_seq.move_next(self.table.bucket_mask); - self.group = Group::load(self.table.ctrl(self.probe_seq.pos)); + self.probe_seq.move_next(self.bucket_mask); + + // Can't use `RawTableInner::ctrl` here as we don't have + // an actual `RawTableInner` reference to use. + let index = self.probe_seq.pos; + debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH); + let group_ctrl = self.ctrl.as_ptr().add(index); + + self.group = Group::load(group_ctrl); self.bitmask = self.group.match_byte(self.h2_hash).into_iter(); } } } } +pub(crate) struct RawExtractIf<'a, T, A: Allocator> { + pub iter: RawIter, + pub table: &'a mut RawTable, +} + +impl RawExtractIf<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) fn next(&mut self, mut f: F) -> Option + where + F: FnMut(&mut T) -> bool, + { + unsafe { + for item in &mut self.iter { + if f(item.as_mut()) { + return Some(self.table.remove(item).0); + } + } + } + None + } +} + #[cfg(test)] mod test_map { use super::*; @@ -2457,4 +4523,214 @@ mod test_map { assert!(table.find(i + 100, |x| *x == i + 100).is_none()); } } + + /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF + /// AN UNINITIALIZED TABLE DURING THE DROP + #[test] + fn test_drop_uninitialized() { + use ::alloc::vec::Vec; + + let table = unsafe { + // SAFETY: The `buckets` is power of two and we're not + // trying to actually use the returned RawTable. + RawTable::<(u64, Vec)>::new_uninitialized(Global, 8, Fallibility::Infallible) + .unwrap() + }; + drop(table); + } + + /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS` + /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES. + #[test] + fn test_drop_zero_items() { + use ::alloc::vec::Vec; + unsafe { + // SAFETY: The `buckets` is power of two and we're not + // trying to actually use the returned RawTable. + let table = + RawTable::<(u64, Vec)>::new_uninitialized(Global, 8, Fallibility::Infallible) + .unwrap(); + + // WE SIMULATE, AS IT WERE, A FULL TABLE. + + // SAFETY: We checked that the table is allocated and therefore the table already has + // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for) + // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe. + table + .table + .ctrl(0) + .write_bytes(EMPTY, table.table.num_ctrl_bytes()); + + // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets() + table.table.ctrl(0).write_bytes(0, table.capacity()); + + // Fix up the trailing control bytes. See the comments in set_ctrl + // for the handling of tables smaller than the group width. + if table.buckets() < Group::WIDTH { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes, + // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to + // `Group::WIDTH` is safe + table + .table + .ctrl(0) + .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets()); + } else { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of + // control bytes,so copying `Group::WIDTH` bytes with offset equal + // to `self.buckets() == self.bucket_mask + 1` is safe + table + .table + .ctrl(0) + .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH); + } + drop(table); + } + } + + /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS` + /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES. + #[test] + fn test_catch_panic_clone_from() { + use ::alloc::sync::Arc; + use ::alloc::vec::Vec; + use allocator_api2::alloc::{AllocError, Allocator, Global}; + use core::sync::atomic::{AtomicI8, Ordering}; + use std::thread; + + struct MyAllocInner { + drop_count: Arc, + } + + #[derive(Clone)] + struct MyAlloc { + _inner: Arc, + } + + impl Drop for MyAllocInner { + fn drop(&mut self) { + println!("MyAlloc freed."); + self.drop_count.fetch_sub(1, Ordering::SeqCst); + } + } + + unsafe impl Allocator for MyAlloc { + fn allocate(&self, layout: Layout) -> std::result::Result, AllocError> { + let g = Global; + g.allocate(layout) + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + let g = Global; + g.deallocate(ptr, layout) + } + } + + const DISARMED: bool = false; + const ARMED: bool = true; + + struct CheckedCloneDrop { + panic_in_clone: bool, + dropped: bool, + need_drop: Vec, + } + + impl Clone for CheckedCloneDrop { + fn clone(&self) -> Self { + if self.panic_in_clone { + panic!("panic in clone") + } + Self { + panic_in_clone: self.panic_in_clone, + dropped: self.dropped, + need_drop: self.need_drop.clone(), + } + } + } + + impl Drop for CheckedCloneDrop { + fn drop(&mut self) { + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } + + let dropped: Arc = Arc::new(AtomicI8::new(2)); + + let mut table = RawTable::new_in(MyAlloc { + _inner: Arc::new(MyAllocInner { + drop_count: dropped.clone(), + }), + }); + + for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() { + let idx = idx as u64; + table.insert( + idx, + ( + idx, + CheckedCloneDrop { + panic_in_clone, + dropped: false, + need_drop: vec![idx], + }, + ), + |(k, _)| *k, + ); + } + + assert_eq!(table.len(), 7); + + thread::scope(|s| { + let result = s.spawn(|| { + let armed_flags = [ + DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + let mut scope_table = RawTable::new_in(MyAlloc { + _inner: Arc::new(MyAllocInner { + drop_count: dropped.clone(), + }), + }); + for (idx, &panic_in_clone) in armed_flags.iter().enumerate() { + let idx = idx as u64; + scope_table.insert( + idx, + ( + idx, + CheckedCloneDrop { + panic_in_clone, + dropped: false, + need_drop: vec![idx + 100], + }, + ), + |(k, _)| *k, + ); + } + table.clone_from(&scope_table); + }); + assert!(result.join().is_err()); + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(table.len(), 0); + assert_eq!(unsafe { table.iter().count() }, 0); + assert_eq!(unsafe { table.iter().iter.count() }, 0); + + for idx in 0..table.buckets() { + let idx = idx as u64; + assert!( + table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 1); + } } diff --git a/vendor/hashbrown/src/raw/neon.rs b/vendor/hashbrown/src/raw/neon.rs new file mode 100644 index 0000000..44e82d5 --- /dev/null +++ b/vendor/hashbrown/src/raw/neon.rs @@ -0,0 +1,124 @@ +use super::bitmask::BitMask; +use super::EMPTY; +use core::arch::aarch64 as neon; +use core::mem; +use core::num::NonZeroU64; + +pub(crate) type BitMaskWord = u64; +pub(crate) type NonZeroBitMaskWord = NonZeroU64; +pub(crate) const BITMASK_STRIDE: usize = 8; +pub(crate) const BITMASK_MASK: BitMaskWord = !0; +pub(crate) const BITMASK_ITER_MASK: BitMaskWord = 0x8080_8080_8080_8080; + +/// Abstraction over a group of control bytes which can be scanned in +/// parallel. +/// +/// This implementation uses a 64-bit NEON value. +#[derive(Copy, Clone)] +pub(crate) struct Group(neon::uint8x8_t); + +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub(crate) const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty bytes, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + #[inline] + pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] { + #[repr(C)] + struct AlignedBytes { + _align: [Group; 0], + bytes: [u8; Group::WIDTH], + } + const ALIGNED_BYTES: AlignedBytes = AlignedBytes { + _align: [], + bytes: [EMPTY; Group::WIDTH], + }; + &ALIGNED_BYTES.bytes + } + + /// Loads a group of bytes starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub(crate) unsafe fn load(ptr: *const u8) -> Self { + Group(neon::vld1_u8(ptr)) + } + + /// Loads a group of bytes starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(neon::vld1_u8(ptr)) + } + + /// Stores the group of bytes to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + neon::vst1_u8(ptr, self.0); + } + + /// Returns a `BitMask` indicating all bytes in the group which *may* + /// have the given value. + #[inline] + pub(crate) fn match_byte(self, byte: u8) -> BitMask { + unsafe { + let cmp = neon::vceq_u8(self.0, neon::vdup_n_u8(byte)); + BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) + } + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY`. + #[inline] + pub(crate) fn match_empty(self) -> BitMask { + self.match_byte(EMPTY) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub(crate) fn match_empty_or_deleted(self) -> BitMask { + unsafe { + let cmp = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0)); + BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) + } + } + + /// Returns a `BitMask` indicating all bytes in the group which are full. + #[inline] + pub(crate) fn match_full(self) -> BitMask { + unsafe { + let cmp = neon::vcgez_s8(neon::vreinterpret_s8_u8(self.0)); + BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0)) + } + } + + /// Performs the following transformation on all bytes in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false) + // 1111_1111 | 1000_0000 = 1111_1111 + // 0000_0000 | 1000_0000 = 1000_0000 + unsafe { + let special = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0)); + Group(neon::vorr_u8(special, neon::vdup_n_u8(0x80))) + } + } +} diff --git a/vendor/hashbrown/src/raw/sse2.rs b/vendor/hashbrown/src/raw/sse2.rs index a0bf6da..956ba5d 100644 --- a/vendor/hashbrown/src/raw/sse2.rs +++ b/vendor/hashbrown/src/raw/sse2.rs @@ -1,28 +1,31 @@ use super::bitmask::BitMask; use super::EMPTY; use core::mem; +use core::num::NonZeroU16; #[cfg(target_arch = "x86")] use core::arch::x86; #[cfg(target_arch = "x86_64")] use core::arch::x86_64 as x86; -pub type BitMaskWord = u16; -pub const BITMASK_STRIDE: usize = 1; -pub const BITMASK_MASK: BitMaskWord = 0xffff; +pub(crate) type BitMaskWord = u16; +pub(crate) type NonZeroBitMaskWord = NonZeroU16; +pub(crate) const BITMASK_STRIDE: usize = 1; +pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff; +pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0; /// Abstraction over a group of control bytes which can be scanned in /// parallel. /// /// This implementation uses a 128-bit SSE value. #[derive(Copy, Clone)] -pub struct Group(x86::__m128i); +pub(crate) struct Group(x86::__m128i); // FIXME: https://github.com/rust-lang/rust-clippy/issues/3859 #[allow(clippy::use_self)] impl Group { /// Number of bytes in the group. - pub const WIDTH: usize = mem::size_of::(); + pub(crate) const WIDTH: usize = mem::size_of::(); /// Returns a full group of empty bytes, suitable for use as the initial /// value for an empty hash table. @@ -30,7 +33,7 @@ impl Group { /// This is guaranteed to be aligned to the group size. #[inline] #[allow(clippy::items_after_statements)] - pub const fn static_empty() -> &'static [u8; Group::WIDTH] { + pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] { #[repr(C)] struct AlignedBytes { _align: [Group; 0], @@ -46,7 +49,7 @@ impl Group { /// Loads a group of bytes starting at the given address. #[inline] #[allow(clippy::cast_ptr_alignment)] // unaligned load - pub unsafe fn load(ptr: *const u8) -> Self { + pub(crate) unsafe fn load(ptr: *const u8) -> Self { Group(x86::_mm_loadu_si128(ptr.cast())) } @@ -54,7 +57,7 @@ impl Group { /// aligned to `mem::align_of::()`. #[inline] #[allow(clippy::cast_ptr_alignment)] - pub unsafe fn load_aligned(ptr: *const u8) -> Self { + pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self { // FIXME: use align_offset once it stabilizes debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); Group(x86::_mm_load_si128(ptr.cast())) @@ -64,7 +67,7 @@ impl Group { /// aligned to `mem::align_of::()`. #[inline] #[allow(clippy::cast_ptr_alignment)] - pub unsafe fn store_aligned(self, ptr: *mut u8) { + pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) { // FIXME: use align_offset once it stabilizes debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); x86::_mm_store_si128(ptr.cast(), self.0); @@ -73,7 +76,7 @@ impl Group { /// Returns a `BitMask` indicating all bytes in the group which have /// the given value. #[inline] - pub fn match_byte(self, byte: u8) -> BitMask { + pub(crate) fn match_byte(self, byte: u8) -> BitMask { #[allow( clippy::cast_possible_wrap, // byte: u8 as i8 // byte: i32 as u16 @@ -91,14 +94,14 @@ impl Group { /// Returns a `BitMask` indicating all bytes in the group which are /// `EMPTY`. #[inline] - pub fn match_empty(self) -> BitMask { + pub(crate) fn match_empty(self) -> BitMask { self.match_byte(EMPTY) } /// Returns a `BitMask` indicating all bytes in the group which are /// `EMPTY` or `DELETED`. #[inline] - pub fn match_empty_or_deleted(self) -> BitMask { + pub(crate) fn match_empty_or_deleted(self) -> BitMask { #[allow( // byte: i32 as u16 // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the @@ -114,7 +117,7 @@ impl Group { /// Returns a `BitMask` indicating all bytes in the group which are full. #[inline] - pub fn match_full(&self) -> BitMask { + pub(crate) fn match_full(&self) -> BitMask { self.match_empty_or_deleted().invert() } @@ -123,7 +126,7 @@ impl Group { /// - `DELETED => EMPTY` /// - `FULL => DELETED` #[inline] - pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self { // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 // and high_bit = 0 (FULL) to 1000_0000 // diff --git a/vendor/hashbrown/src/rustc_entry.rs b/vendor/hashbrown/src/rustc_entry.rs index 2e84595..defbd4b 100644 --- a/vendor/hashbrown/src/rustc_entry.rs +++ b/vendor/hashbrown/src/rustc_entry.rs @@ -1,5 +1,5 @@ use self::RustcEntry::*; -use crate::map::{make_insert_hash, Drain, HashMap, IntoIter, Iter, IterMut}; +use crate::map::{make_hash, Drain, HashMap, IntoIter, Iter, IterMut}; use crate::raw::{Allocator, Bucket, Global, RawTable}; use core::fmt::{self, Debug}; use core::hash::{BuildHasher, Hash}; @@ -9,7 +9,7 @@ impl HashMap where K: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Gets the given key's corresponding entry in the map for in-place manipulation. /// @@ -32,7 +32,7 @@ where /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V, A> { - let hash = make_insert_hash(&self.hash_builder, &key); + let hash = make_hash(&self.hash_builder, &key); if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) { RustcEntry::Occupied(RustcOccupiedEntry { key: Some(key), @@ -62,7 +62,7 @@ where /// [`rustc_entry`]: struct.HashMap.html#method.rustc_entry pub enum RustcEntry<'a, K, V, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. Occupied(RustcOccupiedEntry<'a, K, V, A>), @@ -71,7 +71,7 @@ where Vacant(RustcVacantEntry<'a, K, V, A>), } -impl Debug for RustcEntry<'_, K, V, A> { +impl Debug for RustcEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -86,7 +86,7 @@ impl Debug for RustcEntry<'_, K, V, A> /// [`RustcEntry`]: enum.RustcEntry.html pub struct RustcOccupiedEntry<'a, K, V, A = Global> where - A: Allocator + Clone, + A: Allocator, { key: Option, elem: Bucket<(K, V)>, @@ -97,18 +97,18 @@ unsafe impl Send for RustcOccupiedEntry<'_, K, V, A> where K: Send, V: Send, - A: Allocator + Clone + Send, + A: Allocator + Send, { } unsafe impl Sync for RustcOccupiedEntry<'_, K, V, A> where K: Sync, V: Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { } -impl Debug for RustcOccupiedEntry<'_, K, V, A> { +impl Debug for RustcOccupiedEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) @@ -123,20 +123,20 @@ impl Debug for RustcOccupiedEntry<'_, /// [`RustcEntry`]: enum.RustcEntry.html pub struct RustcVacantEntry<'a, K, V, A = Global> where - A: Allocator + Clone, + A: Allocator, { hash: u64, key: K, table: &'a mut RawTable<(K, V), A>, } -impl Debug for RustcVacantEntry<'_, K, V, A> { +impl Debug for RustcVacantEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.key()).finish() } } -impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator> RustcEntry<'a, K, V, A> { /// Sets the value of the entry, and returns a RustcOccupiedEntry. /// /// # Examples @@ -265,7 +265,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> { } } -impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> { +impl<'a, K, V: Default, A: Allocator> RustcEntry<'a, K, V, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -293,7 +293,7 @@ impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> { } } -impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator> RustcOccupiedEntry<'a, K, V, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -330,7 +330,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> { /// ``` #[cfg_attr(feature = "inline-more", inline)] pub fn remove_entry(self) -> (K, V) { - unsafe { self.table.remove(self.elem) } + unsafe { self.table.remove(self.elem).0 } } /// Gets a reference to the value in the entry. @@ -518,7 +518,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> { } } -impl<'a, K, V, A: Allocator + Clone> RustcVacantEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator> RustcVacantEntry<'a, K, V, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `RustcVacantEntry`. /// diff --git a/vendor/hashbrown/src/scopeguard.rs b/vendor/hashbrown/src/scopeguard.rs index f85e6ab..382d060 100644 --- a/vendor/hashbrown/src/scopeguard.rs +++ b/vendor/hashbrown/src/scopeguard.rs @@ -1,6 +1,6 @@ // Extracted from the scopeguard crate use core::{ - mem, + mem::ManuallyDrop, ops::{Deref, DerefMut}, ptr, }; @@ -28,15 +28,13 @@ where #[inline] pub fn into_inner(guard: Self) -> T { // Cannot move out of Drop-implementing types, so - // ptr::read the value and forget the guard. + // ptr::read the value out of a ManuallyDrop + // Don't use mem::forget as that might invalidate value + let guard = ManuallyDrop::new(guard); unsafe { let value = ptr::read(&guard.value); - // read the closure so that it is dropped, and assign it to a local - // variable to ensure that it is only dropped after the guard has - // been forgotten. (In case the Drop impl of the closure, or that - // of any consumed captured variable, panics). - let _dropfn = ptr::read(&guard.dropfn); - mem::forget(guard); + // read the closure so that it is dropped + let _ = ptr::read(&guard.dropfn); value } } diff --git a/vendor/hashbrown/src/set.rs b/vendor/hashbrown/src/set.rs index 2a4dcea..09b45fd 100644 --- a/vendor/hashbrown/src/set.rs +++ b/vendor/hashbrown/src/set.rs @@ -1,14 +1,14 @@ -use crate::TryReserveError; +#[cfg(feature = "raw")] +use crate::raw::RawTable; +use crate::{Equivalent, TryReserveError}; use alloc::borrow::ToOwned; -use core::borrow::Borrow; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::iter::{Chain, FromIterator, FusedIterator}; -use core::mem; use core::ops::{BitAnd, BitOr, BitXor, Sub}; -use super::map::{self, ConsumeAllOnDrop, DefaultHashBuilder, DrainFilterInner, HashMap, Keys}; -use crate::raw::{Allocator, Global}; +use super::map::{self, DefaultHashBuilder, HashMap, Keys}; +use crate::raw::{Allocator, Global, RawExtractIf}; // Future Optimization (FIXME!) // ============================= @@ -112,7 +112,7 @@ use crate::raw::{Allocator, Global}; /// [`HashMap`]: struct.HashMap.html /// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html /// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html -pub struct HashSet { +pub struct HashSet { pub(crate) map: HashMap, } @@ -135,6 +135,18 @@ impl HashSet { /// The hash set is initially created with a capacity of 0, so it will not allocate until it /// is first inserted into. /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`], for example with + /// [`with_hasher`](HashSet::with_hasher) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// /// # Examples /// /// ``` @@ -153,6 +165,18 @@ impl HashSet { /// The hash set will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash set will not allocate. /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`], for example with + /// [`with_capacity_and_hasher`](HashSet::with_capacity_and_hasher) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// /// # Examples /// /// ``` @@ -169,12 +193,24 @@ impl HashSet { } #[cfg(feature = "ahash")] -impl HashSet { +impl HashSet { /// Creates an empty `HashSet`. /// /// The hash set is initially created with a capacity of 0, so it will not allocate until it /// is first inserted into. /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`], for example with + /// [`with_hasher_in`](HashSet::with_hasher_in) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// /// # Examples /// /// ``` @@ -193,6 +229,18 @@ impl HashSet { /// The hash set will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash set will not allocate. /// + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`], for example with + /// [`with_capacity_and_hasher_in`](HashSet::with_capacity_and_hasher_in) method. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// /// # Examples /// /// ``` @@ -208,7 +256,7 @@ impl HashSet { } } -impl HashSet { +impl HashSet { /// Returns the number of elements the set can hold without reallocating. /// /// # Examples @@ -331,8 +379,11 @@ impl HashSet { /// In other words, move all elements `e` such that `f(&e)` returns `true` out /// into another iterator. /// - /// When the returned DrainedFilter is dropped, any remaining elements that satisfy - /// the predicate are dropped from the set. + /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating + /// or the iteration short-circuits, then the remaining elements will be retained. + /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. + /// + /// [`retain()`]: HashSet::retain /// /// # Examples /// @@ -340,7 +391,7 @@ impl HashSet { /// use hashbrown::HashSet; /// /// let mut set: HashSet = (0..8).collect(); - /// let drained: HashSet = set.drain_filter(|v| v % 2 == 0).collect(); + /// let drained: HashSet = set.extract_if(|v| v % 2 == 0).collect(); /// /// let mut evens = drained.into_iter().collect::>(); /// let mut odds = set.into_iter().collect::>(); @@ -351,13 +402,13 @@ impl HashSet { /// assert_eq!(odds, vec![1, 3, 5, 7]); /// ``` #[cfg_attr(feature = "inline-more", inline)] - pub fn drain_filter(&mut self, f: F) -> DrainFilter<'_, T, F, A> + pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, T, F, A> where F: FnMut(&T) -> bool, { - DrainFilter { + ExtractIf { f, - inner: DrainFilterInner { + inner: RawExtractIf { iter: unsafe { self.map.table.iter() }, table: &mut self.map.table, }, @@ -386,16 +437,23 @@ impl HashSet { /// Creates a new empty hash set which will use the given hasher to hash /// keys. /// - /// The hash set is also created with the default initial capacity. + /// The hash set is initially created with a capacity of 0, so it will not + /// allocate until it is first inserted into. + /// + /// # HashDoS resistance /// - /// Warning: `hasher` is normally randomly generated, and - /// is designed to allow `HashSet`s to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`]. /// /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the HashMap to be useful, see its documentation for details. + /// the HashSet to be useful, see its documentation for details. /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html /// /// # Examples /// @@ -407,8 +465,6 @@ impl HashSet { /// let mut set = HashSet::with_hasher(s); /// set.insert(2); /// ``` - /// - /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html #[cfg_attr(feature = "inline-more", inline)] pub const fn with_hasher(hasher: S) -> Self { Self { @@ -422,13 +478,20 @@ impl HashSet { /// The hash set will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash set will not allocate. /// - /// Warning: `hasher` is normally randomly generated, and - /// is designed to allow `HashSet`s to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`]. /// /// The `hash_builder` passed should implement the [`BuildHasher`] trait for - /// the HashMap to be useful, see its documentation for details. + /// the HashSet to be useful, see its documentation for details. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html /// /// # Examples /// @@ -440,8 +503,6 @@ impl HashSet { /// let mut set = HashSet::with_capacity_and_hasher(10, s); /// set.insert(1); /// ``` - /// - /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html #[cfg_attr(feature = "inline-more", inline)] pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { Self { @@ -452,7 +513,7 @@ impl HashSet { impl HashSet where - A: Allocator + Clone, + A: Allocator, { /// Returns a reference to the underlying allocator. #[inline] @@ -463,12 +524,23 @@ where /// Creates a new empty hash set which will use the given hasher to hash /// keys. /// - /// The hash set is also created with the default initial capacity. + /// The hash set is initially created with a capacity of 0, so it will not + /// allocate until it is first inserted into. + /// + /// # HashDoS resistance /// - /// Warning: `hasher` is normally randomly generated, and - /// is designed to allow `HashSet`s to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`]. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the HashSet to be useful, see its documentation for details. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html /// /// # Examples /// @@ -481,7 +553,7 @@ where /// set.insert(2); /// ``` #[cfg_attr(feature = "inline-more", inline)] - pub fn with_hasher_in(hasher: S, alloc: A) -> Self { + pub const fn with_hasher_in(hasher: S, alloc: A) -> Self { Self { map: HashMap::with_hasher_in(hasher, alloc), } @@ -493,10 +565,20 @@ where /// The hash set will be able to hold at least `capacity` elements without /// reallocating. If `capacity` is 0, the hash set will not allocate. /// - /// Warning: `hasher` is normally randomly generated, and - /// is designed to allow `HashSet`s to be resistant to attacks that - /// cause many collisions and very poor performance. Setting it - /// manually using this function can expose a DoS attack vector. + /// # HashDoS resistance + /// + /// The `hash_builder` normally use a fixed key by default and that does + /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`]. + /// Users who require HashDoS resistance should explicitly use + /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`] + /// as the hasher when creating a [`HashSet`]. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the HashSet to be useful, see its documentation for details. + /// + /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack + /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html /// /// # Examples /// @@ -539,7 +621,7 @@ impl HashSet where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Reserves capacity for at least `additional` more elements to be inserted /// in the `HashSet`. The collection may reserve more space to avoid @@ -547,7 +629,12 @@ where /// /// # Panics /// - /// Panics if the new allocation size overflows `usize`. + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`try_reserve`](HashSet::try_reserve) instead + /// if you want to handle memory allocation failure. + /// + /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html + /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html /// /// # Examples /// @@ -773,8 +860,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn contains(&self, value: &Q) -> bool where - T: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { self.map.contains_key(value) } @@ -800,8 +886,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn get(&self, value: &Q) -> Option<&T> where - T: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { // Avoid `Option::map` because it bloats LLVM IR. match self.map.get_key_value(value) { @@ -856,8 +941,7 @@ where #[inline] pub fn get_or_insert_owned(&mut self, value: &Q) -> &T where - T: Borrow, - Q: Hash + Eq + ToOwned, + Q: Hash + Equivalent + ToOwned, { // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. @@ -889,8 +973,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn get_or_insert_with(&mut self, value: &Q, f: F) -> &T where - T: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, F: FnOnce(&Q) -> T, { // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with @@ -1106,8 +1189,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn remove(&mut self, value: &Q) -> bool where - T: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { self.map.remove(value).is_some() } @@ -1133,8 +1215,7 @@ where #[cfg_attr(feature = "inline-more", inline)] pub fn take(&mut self, value: &Q) -> Option where - T: Borrow, - Q: Hash + Eq, + Q: Hash + Equivalent, { // Avoid `Option::map` because it bloats LLVM IR. match self.map.remove_entry(value) { @@ -1144,11 +1225,53 @@ where } } +impl HashSet { + /// Returns a reference to the [`RawTable`] used underneath [`HashSet`]. + /// This function is only available if the `raw` feature of the crate is enabled. + /// + /// # Note + /// + /// Calling this function is safe, but using the raw hash table API may require + /// unsafe functions or blocks. + /// + /// `RawTable` API gives the lowest level of control under the set that can be useful + /// for extending the HashSet's API, but may lead to *[undefined behavior]*. + /// + /// [`HashSet`]: struct.HashSet.html + /// [`RawTable`]: crate::raw::RawTable + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[cfg(feature = "raw")] + #[cfg_attr(feature = "inline-more", inline)] + pub fn raw_table(&self) -> &RawTable<(T, ()), A> { + self.map.raw_table() + } + + /// Returns a mutable reference to the [`RawTable`] used underneath [`HashSet`]. + /// This function is only available if the `raw` feature of the crate is enabled. + /// + /// # Note + /// + /// Calling this function is safe, but using the raw hash table API may require + /// unsafe functions or blocks. + /// + /// `RawTable` API gives the lowest level of control under the set that can be useful + /// for extending the HashSet's API, but may lead to *[undefined behavior]*. + /// + /// [`HashSet`]: struct.HashSet.html + /// [`RawTable`]: crate::raw::RawTable + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[cfg(feature = "raw")] + #[cfg_attr(feature = "inline-more", inline)] + pub fn raw_table_mut(&mut self) -> &mut RawTable<(T, ()), A> { + self.map.raw_table_mut() + } +} + impl PartialEq for HashSet where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { @@ -1163,14 +1286,14 @@ impl Eq for HashSet where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } impl fmt::Debug for HashSet where T: fmt::Debug, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() @@ -1179,7 +1302,7 @@ where impl From> for HashSet where - A: Allocator + Clone, + A: Allocator, { fn from(map: HashMap) -> Self { Self { map } @@ -1190,7 +1313,7 @@ impl FromIterator for HashSet where T: Eq + Hash, S: BuildHasher + Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn from_iter>(iter: I) -> Self { @@ -1205,7 +1328,7 @@ where impl From<[T; N]> for HashSet where T: Eq + Hash, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// # Examples /// @@ -1225,7 +1348,7 @@ impl Extend for HashSet where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn extend>(&mut self, iter: I) { @@ -1249,7 +1372,7 @@ impl<'a, T, S, A> Extend<&'a T> for HashSet where T: 'a + Eq + Hash + Copy, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn extend>(&mut self, iter: I) { @@ -1272,7 +1395,7 @@ where impl Default for HashSet where S: Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// Creates an empty `HashSet` with the `Default` value for the hasher. #[cfg_attr(feature = "inline-more", inline)] @@ -1287,7 +1410,7 @@ impl BitOr<&HashSet> for &HashSet where T: Eq + Hash + Clone, S: BuildHasher + Default, - A: Allocator + Clone, + A: Allocator, { type Output = HashSet; @@ -1320,7 +1443,7 @@ impl BitAnd<&HashSet> for &HashSet where T: Eq + Hash + Clone, S: BuildHasher + Default, - A: Allocator + Clone, + A: Allocator, { type Output = HashSet; @@ -1431,7 +1554,7 @@ pub struct Iter<'a, K> { /// /// [`HashSet`]: struct.HashSet.html /// [`into_iter`]: struct.HashSet.html#method.into_iter -pub struct IntoIter { +pub struct IntoIter { iter: map::IntoIter, } @@ -1442,23 +1565,24 @@ pub struct IntoIter { /// /// [`HashSet`]: struct.HashSet.html /// [`drain`]: struct.HashSet.html#method.drain -pub struct Drain<'a, K, A: Allocator + Clone = Global> { +pub struct Drain<'a, K, A: Allocator = Global> { iter: map::Drain<'a, K, (), A>, } /// A draining iterator over entries of a `HashSet` which don't satisfy the predicate `f`. /// -/// This `struct` is created by the [`drain_filter`] method on [`HashSet`]. See its +/// This `struct` is created by the [`extract_if`] method on [`HashSet`]. See its /// documentation for more. /// -/// [`drain_filter`]: struct.HashSet.html#method.drain_filter +/// [`extract_if`]: struct.HashSet.html#method.extract_if /// [`HashSet`]: struct.HashSet.html -pub struct DrainFilter<'a, K, F, A: Allocator + Clone = Global> +#[must_use = "Iterators are lazy unless consumed"] +pub struct ExtractIf<'a, K, F, A: Allocator = Global> where F: FnMut(&K) -> bool, { f: F, - inner: DrainFilterInner<'a, K, (), A>, + inner: RawExtractIf<'a, (K, ()), A>, } /// A lazy iterator producing elements in the intersection of `HashSet`s. @@ -1468,7 +1592,7 @@ where /// /// [`HashSet`]: struct.HashSet.html /// [`intersection`]: struct.HashSet.html#method.intersection -pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> { +pub struct Intersection<'a, T, S, A: Allocator = Global> { // iterator of the first set iter: Iter<'a, T>, // the second set @@ -1482,7 +1606,7 @@ pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> { /// /// [`HashSet`]: struct.HashSet.html /// [`difference`]: struct.HashSet.html#method.difference -pub struct Difference<'a, T, S, A: Allocator + Clone = Global> { +pub struct Difference<'a, T, S, A: Allocator = Global> { // iterator of the first set iter: Iter<'a, T>, // the second set @@ -1496,7 +1620,7 @@ pub struct Difference<'a, T, S, A: Allocator + Clone = Global> { /// /// [`HashSet`]: struct.HashSet.html /// [`symmetric_difference`]: struct.HashSet.html#method.symmetric_difference -pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { +pub struct SymmetricDifference<'a, T, S, A: Allocator = Global> { iter: Chain, Difference<'a, T, S, A>>, } @@ -1507,11 +1631,11 @@ pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { /// /// [`HashSet`]: struct.HashSet.html /// [`union`]: struct.HashSet.html#method.union -pub struct Union<'a, T, S, A: Allocator + Clone = Global> { +pub struct Union<'a, T, S, A: Allocator = Global> { iter: Chain, Difference<'a, T, S, A>>, } -impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet { +impl<'a, T, S, A: Allocator> IntoIterator for &'a HashSet { type Item = &'a T; type IntoIter = Iter<'a, T>; @@ -1521,7 +1645,7 @@ impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet { } } -impl IntoIterator for HashSet { +impl IntoIterator for HashSet { type Item = T; type IntoIter = IntoIter; @@ -1587,7 +1711,7 @@ impl fmt::Debug for Iter<'_, K> { } } -impl Iterator for IntoIter { +impl Iterator for IntoIter { type Item = K; #[cfg_attr(feature = "inline-more", inline)] @@ -1603,22 +1727,22 @@ impl Iterator for IntoIter { self.iter.size_hint() } } -impl ExactSizeIterator for IntoIter { +impl ExactSizeIterator for IntoIter { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.iter.len() } } -impl FusedIterator for IntoIter {} +impl FusedIterator for IntoIter {} -impl fmt::Debug for IntoIter { +impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let entries_iter = self.iter.iter().map(|(k, _)| k); f.debug_list().entries(entries_iter).finish() } } -impl Iterator for Drain<'_, K, A> { +impl Iterator for Drain<'_, K, A> { type Item = K; #[cfg_attr(feature = "inline-more", inline)] @@ -1634,36 +1758,22 @@ impl Iterator for Drain<'_, K, A> { self.iter.size_hint() } } -impl ExactSizeIterator for Drain<'_, K, A> { +impl ExactSizeIterator for Drain<'_, K, A> { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.iter.len() } } -impl FusedIterator for Drain<'_, K, A> {} +impl FusedIterator for Drain<'_, K, A> {} -impl fmt::Debug for Drain<'_, K, A> { +impl fmt::Debug for Drain<'_, K, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let entries_iter = self.iter.iter().map(|(k, _)| k); f.debug_list().entries(entries_iter).finish() } } -impl<'a, K, F, A: Allocator + Clone> Drop for DrainFilter<'a, K, F, A> -where - F: FnMut(&K) -> bool, -{ - #[cfg_attr(feature = "inline-more", inline)] - fn drop(&mut self) { - while let Some(item) = self.next() { - let guard = ConsumeAllOnDrop(self); - drop(item); - mem::forget(guard); - } - } -} - -impl Iterator for DrainFilter<'_, K, F, A> +impl Iterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool, { @@ -1671,9 +1781,9 @@ where #[cfg_attr(feature = "inline-more", inline)] fn next(&mut self) -> Option { - let f = &mut self.f; - let (k, _) = self.inner.next(&mut |k, _| f(k))?; - Some(k) + self.inner + .next(|&mut (ref k, ())| (self.f)(k)) + .map(|(k, ())| k) } #[inline] @@ -1682,12 +1792,9 @@ where } } -impl FusedIterator for DrainFilter<'_, K, F, A> where - F: FnMut(&K) -> bool -{ -} +impl FusedIterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool {} -impl Clone for Intersection<'_, T, S, A> { +impl Clone for Intersection<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Intersection { @@ -1701,7 +1808,7 @@ impl<'a, T, S, A> Iterator for Intersection<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -1726,7 +1833,7 @@ impl fmt::Debug for Intersection<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() @@ -1737,11 +1844,11 @@ impl FusedIterator for Intersection<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } -impl Clone for Difference<'_, T, S, A> { +impl Clone for Difference<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Difference { @@ -1755,7 +1862,7 @@ impl<'a, T, S, A> Iterator for Difference<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -1780,7 +1887,7 @@ impl FusedIterator for Difference<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -1788,14 +1895,14 @@ impl fmt::Debug for Difference<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } -impl Clone for SymmetricDifference<'_, T, S, A> { +impl Clone for SymmetricDifference<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { SymmetricDifference { @@ -1808,7 +1915,7 @@ impl<'a, T, S, A> Iterator for SymmetricDifference<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -1826,7 +1933,7 @@ impl FusedIterator for SymmetricDifference<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -1834,14 +1941,14 @@ impl fmt::Debug for SymmetricDifference<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } -impl Clone for Union<'_, T, S, A> { +impl Clone for Union<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Union { @@ -1854,7 +1961,7 @@ impl FusedIterator for Union<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -1862,7 +1969,7 @@ impl fmt::Debug for Union<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() @@ -1873,7 +1980,7 @@ impl<'a, T, S, A> Iterator for Union<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -1925,7 +2032,7 @@ where /// ``` pub enum Entry<'a, T, S, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. /// @@ -1958,7 +2065,7 @@ where Vacant(VacantEntry<'a, T, S, A>), } -impl fmt::Debug for Entry<'_, T, S, A> { +impl fmt::Debug for Entry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -2003,11 +2110,11 @@ impl fmt::Debug for Entry<'_, T, S, A> { /// assert_eq!(set.get(&"c"), None); /// assert_eq!(set.len(), 2); /// ``` -pub struct OccupiedEntry<'a, T, S, A: Allocator + Clone = Global> { +pub struct OccupiedEntry<'a, T, S, A: Allocator = Global> { inner: map::OccupiedEntry<'a, T, (), S, A>, } -impl fmt::Debug for OccupiedEntry<'_, T, S, A> { +impl fmt::Debug for OccupiedEntry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("value", self.get()) @@ -2041,17 +2148,17 @@ impl fmt::Debug for OccupiedEntry<'_, T, /// } /// assert!(set.contains("b") && set.len() == 2); /// ``` -pub struct VacantEntry<'a, T, S, A: Allocator + Clone = Global> { +pub struct VacantEntry<'a, T, S, A: Allocator = Global> { inner: map::VacantEntry<'a, T, (), S, A>, } -impl fmt::Debug for VacantEntry<'_, T, S, A> { +impl fmt::Debug for VacantEntry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.get()).finish() } } -impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> { +impl<'a, T, S, A: Allocator> Entry<'a, T, S, A> { /// Sets the value of the entry, and returns an OccupiedEntry. /// /// # Examples @@ -2128,7 +2235,7 @@ impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> { } } -impl OccupiedEntry<'_, T, S, A> { +impl OccupiedEntry<'_, T, S, A> { /// Gets a reference to the value in the entry. /// /// # Examples @@ -2215,7 +2322,7 @@ impl OccupiedEntry<'_, T, S, A> { } } -impl<'a, T, S, A: Allocator + Clone> VacantEntry<'a, T, S, A> { +impl<'a, T, S, A: Allocator> VacantEntry<'a, T, S, A> { /// Gets a reference to the value that would be used when inserting /// through the `VacantEntry`. /// @@ -2295,34 +2402,30 @@ fn assert_covariance() { fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> { v } - fn into_iter<'new, A: Allocator + Clone>( - v: IntoIter<&'static str, A>, - ) -> IntoIter<&'new str, A> { + fn into_iter<'new, A: Allocator>(v: IntoIter<&'static str, A>) -> IntoIter<&'new str, A> { v } - fn difference<'a, 'new, A: Allocator + Clone>( + fn difference<'a, 'new, A: Allocator>( v: Difference<'a, &'static str, DefaultHashBuilder, A>, ) -> Difference<'a, &'new str, DefaultHashBuilder, A> { v } - fn symmetric_difference<'a, 'new, A: Allocator + Clone>( + fn symmetric_difference<'a, 'new, A: Allocator>( v: SymmetricDifference<'a, &'static str, DefaultHashBuilder, A>, ) -> SymmetricDifference<'a, &'new str, DefaultHashBuilder, A> { v } - fn intersection<'a, 'new, A: Allocator + Clone>( + fn intersection<'a, 'new, A: Allocator>( v: Intersection<'a, &'static str, DefaultHashBuilder, A>, ) -> Intersection<'a, &'new str, DefaultHashBuilder, A> { v } - fn union<'a, 'new, A: Allocator + Clone>( + fn union<'a, 'new, A: Allocator>( v: Union<'a, &'static str, DefaultHashBuilder, A>, ) -> Union<'a, &'new str, DefaultHashBuilder, A> { v } - fn drain<'new, A: Allocator + Clone>( - d: Drain<'static, &'static str, A>, - ) -> Drain<'new, &'new str, A> { + fn drain<'new, A: Allocator>(d: Drain<'static, &'static str, A>) -> Drain<'new, &'new str, A> { d } } @@ -2613,10 +2716,10 @@ mod test_set { set.insert(1); set.insert(2); - let set_str = format!("{:?}", set); + let set_str = format!("{set:?}"); assert!(set_str == "{1, 2}" || set_str == "{2, 1}"); - assert_eq!(format!("{:?}", empty), "{}"); + assert_eq!(format!("{empty:?}"), "{}"); } #[test] @@ -2691,11 +2794,12 @@ mod test_set { } #[test] + #[allow(clippy::needless_borrow)] fn test_extend_ref() { let mut a = HashSet::new(); a.insert(1); - a.extend(&[2, 3, 4]); + a.extend([2, 3, 4]); assert_eq!(a.len(), 4); assert!(a.contains(&1)); @@ -2730,10 +2834,10 @@ mod test_set { } #[test] - fn test_drain_filter() { + fn test_extract_if() { { let mut set: HashSet = (0..8).collect(); - let drained = set.drain_filter(|&k| k % 2 == 0); + let drained = set.extract_if(|&k| k % 2 == 0); let mut out = drained.collect::>(); out.sort_unstable(); assert_eq!(vec![0, 2, 4, 6], out); @@ -2741,7 +2845,7 @@ mod test_set { } { let mut set: HashSet = (0..8).collect(); - drop(set.drain_filter(|&k| k % 2 == 0)); + set.extract_if(|&k| k % 2 == 0).for_each(drop); assert_eq!(set.len(), 4, "Removes non-matching items on drop"); } } @@ -2787,4 +2891,11 @@ mod test_set { set.insert(i); } } + + #[test] + fn collect() { + // At the time of writing, this hits the ZST case in from_base_index + // (and without the `map`, it does not). + let mut _set: HashSet<_> = (0..3).map(|_| ()).collect(); + } } diff --git a/vendor/hashbrown/src/table.rs b/vendor/hashbrown/src/table.rs new file mode 100644 index 0000000..bfb5dd9 --- /dev/null +++ b/vendor/hashbrown/src/table.rs @@ -0,0 +1,2030 @@ +use core::{fmt, iter::FusedIterator, marker::PhantomData}; + +use crate::{ + raw::{ + Allocator, Bucket, Global, InsertSlot, RawDrain, RawExtractIf, RawIntoIter, RawIter, + RawTable, + }, + TryReserveError, +}; + +/// Low-level hash table with explicit hashing. +/// +/// The primary use case for this type over [`HashMap`] or [`HashSet`] is to +/// support types that do not implement the [`Hash`] and [`Eq`] traits, but +/// instead require additional data not contained in the key itself to compute a +/// hash and compare two elements for equality. +/// +/// Examples of when this can be useful include: +/// - An `IndexMap` implementation where indices into a `Vec` are stored as +/// elements in a `HashTable`. Hashing and comparing the elements +/// requires indexing the associated `Vec` to get the actual value referred to +/// by the index. +/// - Avoiding re-computing a hash when it is already known. +/// - Mutating the key of an element in a way that doesn't affect its hash. +/// +/// To achieve this, `HashTable` methods that search for an element in the table +/// require a hash value and equality function to be explicitly passed in as +/// arguments. The method will then iterate over the elements with the given +/// hash and call the equality function on each of them, until a match is found. +/// +/// In most cases, a `HashTable` will not be exposed directly in an API. It will +/// instead be wrapped in a helper type which handles the work of calculating +/// hash values and comparing elements. +/// +/// Due to its low-level nature, this type provides fewer guarantees than +/// [`HashMap`] and [`HashSet`]. Specifically, the API allows you to shoot +/// yourself in the foot by having multiple elements with identical keys in the +/// table. The table itself will still function correctly and lookups will +/// arbitrarily return one of the matching elements. However you should avoid +/// doing this because it changes the runtime of hash table operations from +/// `O(1)` to `O(k)` where `k` is the number of duplicate entries. +/// +/// [`HashMap`]: super::HashMap +/// [`HashSet`]: super::HashSet +pub struct HashTable +where + A: Allocator, +{ + pub(crate) raw: RawTable, +} + +impl HashTable { + /// Creates an empty `HashTable`. + /// + /// The hash table is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashTable; + /// let mut table: HashTable<&str> = HashTable::new(); + /// assert_eq!(table.len(), 0); + /// assert_eq!(table.capacity(), 0); + /// ``` + pub const fn new() -> Self { + Self { + raw: RawTable::new(), + } + } + + /// Creates an empty `HashTable` with the specified capacity. + /// + /// The hash table will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash table will not allocate. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashTable; + /// let mut table: HashTable<&str> = HashTable::with_capacity(10); + /// assert_eq!(table.len(), 0); + /// assert!(table.capacity() >= 10); + /// ``` + pub fn with_capacity(capacity: usize) -> Self { + Self { + raw: RawTable::with_capacity(capacity), + } + } +} + +impl HashTable +where + A: Allocator, +{ + /// Creates an empty `HashTable` using the given allocator. + /// + /// The hash table is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use bumpalo::Bump; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let bump = Bump::new(); + /// let mut table = HashTable::new_in(&bump); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// // The created HashTable holds none elements + /// assert_eq!(table.len(), 0); + /// + /// // The created HashTable also doesn't allocate memory + /// assert_eq!(table.capacity(), 0); + /// + /// // Now we insert element inside created HashTable + /// table.insert_unique(hasher(&"One"), "One", hasher); + /// // We can see that the HashTable holds 1 element + /// assert_eq!(table.len(), 1); + /// // And it also allocates some capacity + /// assert!(table.capacity() > 1); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub const fn new_in(alloc: A) -> Self { + Self { + raw: RawTable::new_in(alloc), + } + } + + /// Creates an empty `HashTable` with the specified capacity using the given allocator. + /// + /// The hash table will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash table will not allocate. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use bumpalo::Bump; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let bump = Bump::new(); + /// let mut table = HashTable::with_capacity_in(5, &bump); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// // The created HashTable holds none elements + /// assert_eq!(table.len(), 0); + /// // But it can hold at least 5 elements without reallocating + /// let empty_map_capacity = table.capacity(); + /// assert!(empty_map_capacity >= 5); + /// + /// // Now we insert some 5 elements inside created HashTable + /// table.insert_unique(hasher(&"One"), "One", hasher); + /// table.insert_unique(hasher(&"Two"), "Two", hasher); + /// table.insert_unique(hasher(&"Three"), "Three", hasher); + /// table.insert_unique(hasher(&"Four"), "Four", hasher); + /// table.insert_unique(hasher(&"Five"), "Five", hasher); + /// + /// // We can see that the HashTable holds 5 elements + /// assert_eq!(table.len(), 5); + /// // But its capacity isn't changed + /// assert_eq!(table.capacity(), empty_map_capacity) + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + Self { + raw: RawTable::with_capacity_in(capacity, alloc), + } + } + + /// Returns a reference to the underlying allocator. + pub fn allocator(&self) -> &A { + self.raw.allocator() + } + + /// Returns a reference to an entry in the table with the given hash and + /// which satisfies the equality function passed. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// table.insert_unique(hasher(&3), 3, hasher); + /// assert_eq!(table.find(hasher(&2), |&val| val == 2), Some(&2)); + /// assert_eq!(table.find(hasher(&4), |&val| val == 4), None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn find(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { + self.raw.get(hash, eq) + } + + /// Returns a mutable reference to an entry in the table with the given hash + /// and which satisfies the equality function passed. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// When mutating an entry, you should ensure that it still retains the same + /// hash value as when it was inserted, otherwise lookups of that entry may + /// fail to find it. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); + /// if let Some(val) = table.find_mut(hasher(&1), |val| val.0 == 1) { + /// val.1 = "b"; + /// } + /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), Some(&(1, "b"))); + /// assert_eq!(table.find(hasher(&2), |val| val.0 == 2), None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn find_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { + self.raw.get_mut(hash, eq) + } + + /// Returns an `OccupiedEntry` for an entry in the table with the given hash + /// and which satisfies the equality function passed. + /// + /// This can be used to remove the entry from the table. Call + /// [`HashTable::entry`] instead if you wish to insert an entry if the + /// lookup fails. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); + /// if let Ok(entry) = table.find_entry(hasher(&1), |val| val.0 == 1) { + /// entry.remove(); + /// } + /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn find_entry( + &mut self, + hash: u64, + eq: impl FnMut(&T) -> bool, + ) -> Result, AbsentEntry<'_, T, A>> { + match self.raw.find(hash, eq) { + Some(bucket) => Ok(OccupiedEntry { + hash, + bucket, + table: self, + }), + None => Err(AbsentEntry { table: self }), + } + } + + /// Returns an `Entry` for an entry in the table with the given hash + /// and which satisfies the equality function passed. + /// + /// This can be used to remove the entry from the table, or insert a new + /// entry with the given hash if one doesn't already exist. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// This method may grow the table in preparation for an insertion. Call + /// [`HashTable::find_entry`] if this is undesirable. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); + /// if let Entry::Occupied(entry) = table.entry(hasher(&1), |val| val.0 == 1, |val| hasher(&val.0)) + /// { + /// entry.remove(); + /// } + /// if let Entry::Vacant(entry) = table.entry(hasher(&2), |val| val.0 == 2, |val| hasher(&val.0)) { + /// entry.insert((2, "b")); + /// } + /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), None); + /// assert_eq!(table.find(hasher(&2), |val| val.0 == 2), Some(&(2, "b"))); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn entry( + &mut self, + hash: u64, + eq: impl FnMut(&T) -> bool, + hasher: impl Fn(&T) -> u64, + ) -> Entry<'_, T, A> { + match self.raw.find_or_find_insert_slot(hash, eq, hasher) { + Ok(bucket) => Entry::Occupied(OccupiedEntry { + hash, + bucket, + table: self, + }), + Err(insert_slot) => Entry::Vacant(VacantEntry { + hash, + insert_slot, + table: self, + }), + } + } + + /// Inserts an element into the `HashTable` with the given hash value, but + /// without checking whether an equivalent element already exists within the + /// table. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut v = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// v.insert_unique(hasher(&1), 1, hasher); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn insert_unique( + &mut self, + hash: u64, + value: T, + hasher: impl Fn(&T) -> u64, + ) -> OccupiedEntry<'_, T, A> { + let bucket = self.raw.insert(hash, value, hasher); + OccupiedEntry { + hash, + bucket, + table: self, + } + } + + /// Clears the table, removing all values. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut v = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// v.insert_unique(hasher(&1), 1, hasher); + /// v.clear(); + /// assert!(v.is_empty()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn clear(&mut self) { + self.raw.clear(); + } + + /// Shrinks the capacity of the table as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::with_capacity(100); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// assert!(table.capacity() >= 100); + /// table.shrink_to_fit(hasher); + /// assert!(table.capacity() >= 2); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn shrink_to_fit(&mut self, hasher: impl Fn(&T) -> u64) { + self.raw.shrink_to(self.len(), hasher) + } + + /// Shrinks the capacity of the table with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::with_capacity(100); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// assert!(table.capacity() >= 100); + /// table.shrink_to(10, hasher); + /// assert!(table.capacity() >= 10); + /// table.shrink_to(0, hasher); + /// assert!(table.capacity() >= 2); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn shrink_to(&mut self, min_capacity: usize, hasher: impl Fn(&T) -> u64) { + self.raw.shrink_to(min_capacity, hasher); + } + + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the `HashTable`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`try_reserve`](HashTable::try_reserve) instead + /// if you want to handle memory allocation failure. + /// + /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html + /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.reserve(10, hasher); + /// assert!(table.capacity() >= 10); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { + self.raw.reserve(additional, hasher) + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashTable`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table + /// .try_reserve(10, hasher) + /// .expect("why is the test harness OOMing on 10 bytes?"); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn try_reserve( + &mut self, + additional: usize, + hasher: impl Fn(&T) -> u64, + ) -> Result<(), TryReserveError> { + self.raw.try_reserve(additional, hasher) + } + + /// Returns the number of elements the table can hold without reallocating. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashTable; + /// let table: HashTable = HashTable::with_capacity(100); + /// assert!(table.capacity() >= 100); + /// ``` + pub fn capacity(&self) -> usize { + self.raw.capacity() + } + + /// Returns the number of elements in the table. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// let mut v = HashTable::new(); + /// assert_eq!(v.len(), 0); + /// v.insert_unique(hasher(&1), 1, hasher); + /// assert_eq!(v.len(), 1); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn len(&self) -> usize { + self.raw.len() + } + + /// Returns `true` if the set contains no elements. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// let mut v = HashTable::new(); + /// assert!(v.is_empty()); + /// v.insert_unique(hasher(&1), 1, hasher); + /// assert!(!v.is_empty()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn is_empty(&self) -> bool { + self.raw.is_empty() + } + + /// An iterator visiting all elements in arbitrary order. + /// The iterator element type is `&'a T`. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"a"), "b", hasher); + /// table.insert_unique(hasher(&"b"), "b", hasher); + /// + /// // Will print in an arbitrary order. + /// for x in table.iter() { + /// println!("{}", x); + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn iter(&self) -> Iter<'_, T> { + Iter { + inner: unsafe { self.raw.iter() }, + marker: PhantomData, + } + } + + /// An iterator visiting all elements in arbitrary order, + /// with mutable references to the elements. + /// The iterator element type is `&'a mut T`. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// table.insert_unique(hasher(&3), 3, hasher); + /// + /// // Update all values + /// for val in table.iter_mut() { + /// *val *= 2; + /// } + /// + /// assert_eq!(table.len(), 3); + /// let mut vec: Vec = Vec::new(); + /// + /// for val in &table { + /// println!("val: {}", val); + /// vec.push(*val); + /// } + /// + /// // The `Iter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [2, 4, 6]); + /// + /// assert_eq!(table.len(), 3); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn iter_mut(&mut self) -> IterMut<'_, T> { + IterMut { + inner: unsafe { self.raw.iter() }, + marker: PhantomData, + } + } + + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all elements `e` such that `f(&e)` returns `false`. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in 1..=6 { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// table.retain(|&mut x| x % 2 == 0); + /// assert_eq!(table.len(), 3); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn retain(&mut self, mut f: impl FnMut(&mut T) -> bool) { + // Here we only use `iter` as a temporary, preventing use-after-free + unsafe { + for item in self.raw.iter() { + if !f(item.as_mut()) { + self.raw.erase(item); + } + } + } + } + + /// Clears the set, returning all elements in an iterator. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in 1..=3 { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// assert!(!table.is_empty()); + /// + /// // print 1, 2, 3 in an arbitrary order + /// for i in table.drain() { + /// println!("{}", i); + /// } + /// + /// assert!(table.is_empty()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn drain(&mut self) -> Drain<'_, T, A> { + Drain { + inner: self.raw.drain(), + } + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// In other words, move all elements `e` such that `f(&e)` returns `true` out + /// into another iterator. + /// + /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating + /// or the iteration short-circuits, then the remaining elements will be retained. + /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. + /// + /// [`retain()`]: HashTable::retain + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in 0..8 { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// let drained: Vec = table.extract_if(|&mut v| v % 2 == 0).collect(); + /// + /// let mut evens = drained.into_iter().collect::>(); + /// let mut odds = table.into_iter().collect::>(); + /// evens.sort(); + /// odds.sort(); + /// + /// assert_eq!(evens, vec![0, 2, 4, 6]); + /// assert_eq!(odds, vec![1, 3, 5, 7]); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn extract_if(&mut self, f: F) -> ExtractIf<'_, T, F, A> + where + F: FnMut(&mut T) -> bool, + { + ExtractIf { + f, + inner: RawExtractIf { + iter: unsafe { self.raw.iter() }, + table: &mut self.raw, + }, + } + } + + /// Attempts to get mutable references to `N` values in the map at once. + /// + /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to + /// the `i`th key to be looked up. + /// + /// Returns an array of length `N` with the results of each query. For soundness, at most one + /// mutable reference will be returned to any value. `None` will be returned if any of the + /// keys are duplicates or missing. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut libraries: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for (k, v) in [ + /// ("Bodleian Library", 1602), + /// ("Athenæum", 1807), + /// ("Herzogin-Anna-Amalia-Bibliothek", 1691), + /// ("Library of Congress", 1800), + /// ] { + /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k)); + /// } + /// + /// let keys = ["Athenæum", "Library of Congress"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!( + /// got, + /// Some([&mut ("Athenæum", 1807), &mut ("Library of Congress", 1800),]), + /// ); + /// + /// // Missing keys result in None + /// let keys = ["Athenæum", "New York Public Library"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!(got, None); + /// + /// // Duplicate keys result in None + /// let keys = ["Athenæum", "Athenæum"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!(got, None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn get_many_mut( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> Option<[&'_ mut T; N]> { + self.raw.get_many_mut(hashes, eq) + } + + /// Attempts to get mutable references to `N` values in the map at once, without validating that + /// the values are unique. + /// + /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to + /// the `i`th key to be looked up. + /// + /// Returns an array of length `N` with the results of each query. `None` will be returned if + /// any of the keys are missing. + /// + /// For a safe alternative see [`get_many_mut`](`HashTable::get_many_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut libraries: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for (k, v) in [ + /// ("Bodleian Library", 1602), + /// ("Athenæum", 1807), + /// ("Herzogin-Anna-Amalia-Bibliothek", 1691), + /// ("Library of Congress", 1800), + /// ] { + /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k)); + /// } + /// + /// let keys = ["Athenæum", "Library of Congress"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!( + /// got, + /// Some([&mut ("Athenæum", 1807), &mut ("Library of Congress", 1800),]), + /// ); + /// + /// // Missing keys result in None + /// let keys = ["Athenæum", "New York Public Library"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!(got, None); + /// + /// // Duplicate keys result in None + /// let keys = ["Athenæum", "Athenæum"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!(got, None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub unsafe fn get_many_unchecked_mut( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> Option<[&'_ mut T; N]> { + self.raw.get_many_unchecked_mut(hashes, eq) + } +} + +impl IntoIterator for HashTable +where + A: Allocator, +{ + type Item = T; + type IntoIter = IntoIter; + + fn into_iter(self) -> IntoIter { + IntoIter { + inner: self.raw.into_iter(), + } + } +} + +impl<'a, T, A> IntoIterator for &'a HashTable +where + A: Allocator, +{ + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl<'a, T, A> IntoIterator for &'a mut HashTable +where + A: Allocator, +{ + type Item = &'a mut T; + type IntoIter = IterMut<'a, T>; + + fn into_iter(self) -> IterMut<'a, T> { + self.iter_mut() + } +} + +impl Default for HashTable +where + A: Allocator + Default, +{ + fn default() -> Self { + Self { + raw: Default::default(), + } + } +} + +impl Clone for HashTable +where + T: Clone, + A: Allocator + Clone, +{ + fn clone(&self) -> Self { + Self { + raw: self.raw.clone(), + } + } +} + +impl fmt::Debug for HashTable +where + T: fmt::Debug, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set().entries(self.iter()).finish() + } +} + +/// A view into a single entry in a table, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`entry`] method on [`HashTable`]. +/// +/// [`HashTable`]: struct.HashTable.html +/// [`entry`]: struct.HashTable.html#method.entry +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use ahash::AHasher; +/// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; +/// use std::hash::{BuildHasher, BuildHasherDefault}; +/// +/// let mut table = HashTable::new(); +/// let hasher = BuildHasherDefault::::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// for x in ["a", "b", "c"] { +/// table.insert_unique(hasher(&x), x, hasher); +/// } +/// assert_eq!(table.len(), 3); +/// +/// // Existing value (insert) +/// let entry: Entry<_> = table.entry(hasher(&"a"), |&x| x == "a", hasher); +/// let _raw_o: OccupiedEntry<_, _> = entry.insert("a"); +/// assert_eq!(table.len(), 3); +/// // Nonexistent value (insert) +/// table.entry(hasher(&"d"), |&x| x == "d", hasher).insert("d"); +/// +/// // Existing value (or_insert) +/// table +/// .entry(hasher(&"b"), |&x| x == "b", hasher) +/// .or_insert("b"); +/// // Nonexistent value (or_insert) +/// table +/// .entry(hasher(&"e"), |&x| x == "e", hasher) +/// .or_insert("e"); +/// +/// println!("Our HashTable: {:?}", table); +/// +/// let mut vec: Vec<_> = table.iter().copied().collect(); +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, ["a", "b", "c", "d", "e"]); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub enum Entry<'a, T, A = Global> +where + A: Allocator, +{ + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in ["a", "b"] { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// + /// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { + /// Entry::Vacant(_) => unreachable!(), + /// Entry::Occupied(_) => {} + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + Occupied(OccupiedEntry<'a, T, A>), + + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::<&str>::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { + /// Entry::Vacant(_) => {} + /// Entry::Occupied(_) => unreachable!(), + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + Vacant(VacantEntry<'a, T, A>), +} + +impl fmt::Debug for Entry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +impl<'a, T, A> Entry<'a, T, A> +where + A: Allocator, +{ + /// Sets the value of the entry, replacing any existing value if there is + /// one, and returns an [`OccupiedEntry`]. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// let entry = table + /// .entry(hasher(&"horseyland"), |&x| x == "horseyland", hasher) + /// .insert("horseyland"); + /// + /// assert_eq!(entry.get(), &"horseyland"); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn insert(self, value: T) -> OccupiedEntry<'a, T, A> { + match self { + Entry::Occupied(mut entry) => { + *entry.get_mut() = value; + entry + } + Entry::Vacant(entry) => entry.insert(value), + } + } + + /// Ensures a value is in the entry by inserting if it was vacant. + /// + /// Returns an [`OccupiedEntry`] pointing to the now-occupied entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// // nonexistent key + /// table + /// .entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) + /// .or_insert("poneyland"); + /// assert!(table + /// .find(hasher(&"poneyland"), |&x| x == "poneyland") + /// .is_some()); + /// + /// // existing key + /// table + /// .entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) + /// .or_insert("poneyland"); + /// assert!(table + /// .find(hasher(&"poneyland"), |&x| x == "poneyland") + /// .is_some()); + /// assert_eq!(table.len(), 1); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn or_insert(self, default: T) -> OccupiedEntry<'a, T, A> { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty.. + /// + /// Returns an [`OccupiedEntry`] pointing to the now-occupied entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// table + /// .entry(hasher("poneyland"), |x| x == "poneyland", |val| hasher(val)) + /// .or_insert_with(|| "poneyland".to_string()); + /// + /// assert!(table + /// .find(hasher(&"poneyland"), |x| x == "poneyland") + /// .is_some()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn or_insert_with(self, default: impl FnOnce() -> T) -> OccupiedEntry<'a, T, A> { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => entry.insert(default()), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the table. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// table + /// .entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) + /// .and_modify(|(_, v)| *v += 1) + /// .or_insert(("poneyland", 42)); + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(k, _)| k == "poneyland"), + /// Some(&("poneyland", 42)) + /// ); + /// + /// table + /// .entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) + /// .and_modify(|(_, v)| *v += 1) + /// .or_insert(("poneyland", 42)); + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(k, _)| k == "poneyland"), + /// Some(&("poneyland", 43)) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn and_modify(self, f: impl FnOnce(&mut T)) -> Self { + match self { + Entry::Occupied(mut entry) => { + f(entry.get_mut()); + Entry::Occupied(entry) + } + Entry::Vacant(entry) => Entry::Vacant(entry), + } + } +} + +/// A view into an occupied entry in a `HashTable`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use ahash::AHasher; +/// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; +/// use std::hash::{BuildHasher, BuildHasherDefault}; +/// +/// let mut table = HashTable::new(); +/// let hasher = BuildHasherDefault::::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// for x in ["a", "b", "c"] { +/// table.insert_unique(hasher(&x), x, hasher); +/// } +/// assert_eq!(table.len(), 3); +/// +/// let _entry_o: OccupiedEntry<_, _> = table.find_entry(hasher(&"a"), |&x| x == "a").unwrap(); +/// assert_eq!(table.len(), 3); +/// +/// // Existing key +/// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.get(), &"a"); +/// } +/// } +/// +/// assert_eq!(table.len(), 3); +/// +/// // Existing key (take) +/// match table.entry(hasher(&"c"), |&x| x == "c", hasher) { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.remove().0, "c"); +/// } +/// } +/// assert_eq!(table.find(hasher(&"c"), |&x| x == "c"), None); +/// assert_eq!(table.len(), 2); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub struct OccupiedEntry<'a, T, A = Global> +where + A: Allocator, +{ + hash: u64, + bucket: Bucket, + table: &'a mut HashTable, +} + +unsafe impl Send for OccupiedEntry<'_, T, A> +where + T: Send, + A: Send + Allocator, +{ +} +unsafe impl Sync for OccupiedEntry<'_, T, A> +where + T: Sync, + A: Sync + Allocator, +{ +} + +impl fmt::Debug for OccupiedEntry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("value", self.get()) + .finish() + } +} + +impl<'a, T, A> OccupiedEntry<'a, T, A> +where + A: Allocator, +{ + /// Takes the value out of the entry, and returns it along with a + /// `VacantEntry` that can be used to insert another value with the same + /// hash as the one that was just removed. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// // The table is empty + /// assert!(table.is_empty() && table.capacity() == 0); + /// + /// table.insert_unique(hasher(&"poneyland"), "poneyland", hasher); + /// let capacity_before_remove = table.capacity(); + /// + /// if let Entry::Occupied(o) = table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { + /// assert_eq!(o.remove().0, "poneyland"); + /// } + /// + /// assert!(table + /// .find(hasher(&"poneyland"), |&x| x == "poneyland") + /// .is_none()); + /// // Now table hold none elements but capacity is equal to the old one + /// assert!(table.len() == 0 && table.capacity() == capacity_before_remove); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn remove(self) -> (T, VacantEntry<'a, T, A>) { + let (val, slot) = unsafe { self.table.raw.remove(self.bucket) }; + ( + val, + VacantEntry { + hash: self.hash, + insert_slot: slot, + table: self.table, + }, + ) + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"poneyland"), "poneyland", hasher); + /// + /// match table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { + /// Entry::Vacant(_) => panic!(), + /// Entry::Occupied(entry) => assert_eq!(entry.get(), &"poneyland"), + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn get(&self) -> &T { + unsafe { self.bucket.as_ref() } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// If you need a reference to the `OccupiedEntry` which may outlive the + /// destruction of the `Entry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"poneyland"), ("poneyland", 12), |(k, _)| hasher(&k)); + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 12)) + /// ); + /// + /// if let Entry::Occupied(mut o) = table.entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) { + /// o.get_mut().1 += 10; + /// assert_eq!(o.get().1, 22); + /// + /// // We can use the same Entry multiple times. + /// o.get_mut().1 += 2; + /// } + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 24)) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn get_mut(&mut self) -> &mut T { + unsafe { self.bucket.as_mut() } + } + + /// Converts the OccupiedEntry into a mutable reference to the value in the entry + /// with a lifetime bound to the table itself. + /// + /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"poneyland"), ("poneyland", 12), |(k, _)| hasher(&k)); + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 12)) + /// ); + /// + /// let value: &mut (&str, u32); + /// match table.entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) { + /// Entry::Occupied(entry) => value = entry.into_mut(), + /// Entry::Vacant(_) => panic!(), + /// } + /// value.1 += 10; + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 22)) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn into_mut(self) -> &'a mut T { + unsafe { self.bucket.as_mut() } + } + + /// Converts the OccupiedEntry into a mutable reference to the underlying + /// table. + pub fn into_table(self) -> &'a mut HashTable { + self.table + } +} + +/// A view into a vacant entry in a `HashTable`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use ahash::AHasher; +/// use hashbrown::hash_table::{Entry, HashTable, VacantEntry}; +/// use std::hash::{BuildHasher, BuildHasherDefault}; +/// +/// let mut table: HashTable<&str> = HashTable::new(); +/// let hasher = BuildHasherDefault::::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// +/// let entry_v: VacantEntry<_, _> = match table.entry(hasher(&"a"), |&x| x == "a", hasher) { +/// Entry::Vacant(view) => view, +/// Entry::Occupied(_) => unreachable!(), +/// }; +/// entry_v.insert("a"); +/// assert!(table.find(hasher(&"a"), |&x| x == "a").is_some() && table.len() == 1); +/// +/// // Nonexistent key (insert) +/// match table.entry(hasher(&"b"), |&x| x == "b", hasher) { +/// Entry::Vacant(view) => { +/// view.insert("b"); +/// } +/// Entry::Occupied(_) => unreachable!(), +/// } +/// assert!(table.find(hasher(&"b"), |&x| x == "b").is_some() && table.len() == 2); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub struct VacantEntry<'a, T, A = Global> +where + A: Allocator, +{ + hash: u64, + insert_slot: InsertSlot, + table: &'a mut HashTable, +} + +impl fmt::Debug for VacantEntry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("VacantEntry") + } +} + +impl<'a, T, A> VacantEntry<'a, T, A> +where + A: Allocator, +{ + /// Inserts a new element into the table with the hash that was used to + /// obtain the `VacantEntry`. + /// + /// An `OccupiedEntry` is returned for the newly inserted element. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = BuildHasherDefault::::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// if let Entry::Vacant(o) = table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { + /// o.insert("poneyland"); + /// } + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&x| x == "poneyland"), + /// Some(&"poneyland") + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn insert(self, value: T) -> OccupiedEntry<'a, T, A> { + let bucket = unsafe { + self.table + .raw + .insert_in_slot(self.hash, self.insert_slot, value) + }; + OccupiedEntry { + hash: self.hash, + bucket, + table: self.table, + } + } + + /// Converts the VacantEntry into a mutable reference to the underlying + /// table. + pub fn into_table(self) -> &'a mut HashTable { + self.table + } +} + +/// Type representing the absence of an entry, as returned by [`HashTable::find_entry`]. +/// +/// This type only exists due to [limitations] in Rust's NLL borrow checker. In +/// the future, `find_entry` will return an `Option` and this +/// type will be removed. +/// +/// [limitations]: https://smallcultfollowing.com/babysteps/blog/2018/06/15/mir-based-borrow-check-nll-status-update/#polonius +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use ahash::AHasher; +/// use hashbrown::hash_table::{AbsentEntry, Entry, HashTable}; +/// use std::hash::{BuildHasher, BuildHasherDefault}; +/// +/// let mut table: HashTable<&str> = HashTable::new(); +/// let hasher = BuildHasherDefault::::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// +/// let entry_v: AbsentEntry<_, _> = table.find_entry(hasher(&"a"), |&x| x == "a").unwrap_err(); +/// entry_v +/// .into_table() +/// .insert_unique(hasher(&"a"), "a", hasher); +/// assert!(table.find(hasher(&"a"), |&x| x == "a").is_some() && table.len() == 1); +/// +/// // Nonexistent key (insert) +/// match table.entry(hasher(&"b"), |&x| x == "b", hasher) { +/// Entry::Vacant(view) => { +/// view.insert("b"); +/// } +/// Entry::Occupied(_) => unreachable!(), +/// } +/// assert!(table.find(hasher(&"b"), |&x| x == "b").is_some() && table.len() == 2); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub struct AbsentEntry<'a, T, A = Global> +where + A: Allocator, +{ + table: &'a mut HashTable, +} + +impl fmt::Debug for AbsentEntry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("AbsentEntry") + } +} + +impl<'a, T, A> AbsentEntry<'a, T, A> +where + A: Allocator, +{ + /// Converts the AbsentEntry into a mutable reference to the underlying + /// table. + pub fn into_table(self) -> &'a mut HashTable { + self.table + } +} + +/// An iterator over the entries of a `HashTable` in arbitrary order. +/// The iterator element type is `&'a T`. +/// +/// This `struct` is created by the [`iter`] method on [`HashTable`]. See its +/// documentation for more. +/// +/// [`iter`]: struct.HashTable.html#method.iter +/// [`HashTable`]: struct.HashTable.html +pub struct Iter<'a, T> { + inner: RawIter, + marker: PhantomData<&'a T>, +} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option { + self.inner.next().map(|bucket| unsafe { bucket.as_ref() }) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl ExactSizeIterator for Iter<'_, T> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for Iter<'_, T> {} + +/// A mutable iterator over the entries of a `HashTable` in arbitrary order. +/// The iterator element type is `&'a mut T`. +/// +/// This `struct` is created by the [`iter_mut`] method on [`HashTable`]. See its +/// documentation for more. +/// +/// [`iter_mut`]: struct.HashTable.html#method.iter_mut +/// [`HashTable`]: struct.HashTable.html +pub struct IterMut<'a, T> { + inner: RawIter, + marker: PhantomData<&'a mut T>, +} + +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + self.inner.next().map(|bucket| unsafe { bucket.as_mut() }) + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl ExactSizeIterator for IterMut<'_, T> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for IterMut<'_, T> {} + +/// An owning iterator over the entries of a `HashTable` in arbitrary order. +/// The iterator element type is `T`. +/// +/// This `struct` is created by the [`into_iter`] method on [`HashTable`] +/// (provided by the [`IntoIterator`] trait). See its documentation for more. +/// The table cannot be used after calling that method. +/// +/// [`into_iter`]: struct.HashTable.html#method.into_iter +/// [`HashTable`]: struct.HashTable.html +/// [`IntoIterator`]: https://doc.rust-lang.org/core/iter/trait.IntoIterator.html +pub struct IntoIter +where + A: Allocator, +{ + inner: RawIntoIter, +} + +impl Iterator for IntoIter +where + A: Allocator, +{ + type Item = T; + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl ExactSizeIterator for IntoIter +where + A: Allocator, +{ + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for IntoIter where A: Allocator {} + +/// A draining iterator over the items of a `HashTable`. +/// +/// This `struct` is created by the [`drain`] method on [`HashTable`]. +/// See its documentation for more. +/// +/// [`HashTable`]: struct.HashTable.html +/// [`drain`]: struct.HashTable.html#method.drain +pub struct Drain<'a, T, A: Allocator = Global> { + inner: RawDrain<'a, T, A>, +} + +impl Drain<'_, T, A> { + /// Returns a iterator of references over the remaining items. + fn iter(&self) -> Iter<'_, T> { + Iter { + inner: self.inner.iter(), + marker: PhantomData, + } + } +} + +impl Iterator for Drain<'_, T, A> { + type Item = T; + + fn next(&mut self) -> Option { + self.inner.next() + } + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for Drain<'_, T, A> { + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for Drain<'_, T, A> {} + +impl fmt::Debug for Drain<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +/// A draining iterator over entries of a `HashTable` which don't satisfy the predicate `f`. +/// +/// This `struct` is created by [`HashTable::extract_if`]. See its +/// documentation for more. +#[must_use = "Iterators are lazy unless consumed"] +pub struct ExtractIf<'a, T, F, A: Allocator = Global> +where + F: FnMut(&mut T) -> bool, +{ + f: F, + inner: RawExtractIf<'a, T, A>, +} + +impl Iterator for ExtractIf<'_, T, F, A> +where + F: FnMut(&mut T) -> bool, +{ + type Item = T; + + #[inline] + fn next(&mut self) -> Option { + self.inner.next(|val| (self.f)(val)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (0, self.inner.iter.size_hint().1) + } +} + +impl FusedIterator for ExtractIf<'_, T, F, A> where F: FnMut(&mut T) -> bool {} diff --git a/vendor/hashbrown/tests/equivalent_trait.rs b/vendor/hashbrown/tests/equivalent_trait.rs new file mode 100644 index 0000000..713dddd --- /dev/null +++ b/vendor/hashbrown/tests/equivalent_trait.rs @@ -0,0 +1,53 @@ +use hashbrown::Equivalent; +use hashbrown::HashMap; + +use std::hash::Hash; + +#[derive(Debug, Hash)] +pub struct Pair(pub A, pub B); + +impl PartialEq<(A, B)> for Pair +where + C: PartialEq, + D: PartialEq, +{ + fn eq(&self, rhs: &(A, B)) -> bool { + self.0 == rhs.0 && self.1 == rhs.1 + } +} + +impl Equivalent for Pair +where + Pair: PartialEq, + A: Hash + Eq, + B: Hash + Eq, +{ + fn equivalent(&self, other: &X) -> bool { + *self == *other + } +} + +#[test] +fn test_lookup() { + let s = String::from; + let mut map = HashMap::new(); + map.insert((s("a"), s("b")), 1); + map.insert((s("a"), s("x")), 2); + + assert!(map.contains_key(&Pair("a", "b"))); + assert!(!map.contains_key(&Pair("b", "a"))); +} + +#[test] +fn test_string_str() { + let s = String::from; + let mut map = HashMap::new(); + map.insert(s("a"), 1); + map.insert(s("b"), 2); + map.insert(s("x"), 3); + map.insert(s("y"), 4); + + assert!(map.contains_key("a")); + assert!(!map.contains_key("z")); + assert_eq!(map.remove("b"), Some(2)); +} diff --git a/vendor/hashbrown/tests/raw.rs b/vendor/hashbrown/tests/raw.rs new file mode 100644 index 0000000..858836e --- /dev/null +++ b/vendor/hashbrown/tests/raw.rs @@ -0,0 +1,11 @@ +#![cfg(feature = "raw")] + +use hashbrown::raw::RawTable; +use std::mem; + +#[test] +fn test_allocation_info() { + assert_eq!(RawTable::<()>::new().allocation_info().1.size(), 0); + assert_eq!(RawTable::::new().allocation_info().1.size(), 0); + assert!(RawTable::::with_capacity(1).allocation_info().1.size() > mem::size_of::()); +} diff --git a/vendor/hashbrown/tests/rayon.rs b/vendor/hashbrown/tests/rayon.rs index 8c603c5..d55e5a9 100644 --- a/vendor/hashbrown/tests/rayon.rs +++ b/vendor/hashbrown/tests/rayon.rs @@ -356,7 +356,9 @@ fn set_seq_par_equivalence_into_iter_empty() { let vec_seq = SET_EMPTY.clone().into_iter().collect::>(); let vec_par = SET_EMPTY.clone().into_par_iter().collect::>(); - assert_eq3!(vec_seq, vec_par, []); + // Work around type inference failure introduced by rend dev-dependency. + let empty: [char; 0] = []; + assert_eq3!(vec_seq, vec_par, empty); } #[test] diff --git a/vendor/hashbrown/tests/set.rs b/vendor/hashbrown/tests/set.rs index 5ae1ec9..86ec964 100644 --- a/vendor/hashbrown/tests/set.rs +++ b/vendor/hashbrown/tests/set.rs @@ -27,7 +27,7 @@ fn test_hashset_insert_remove() { assert_eq!(m.insert(x.clone()), true); } for (i, x) in tx.iter().enumerate() { - println!("removing {} {:?}", i, x); + println!("removing {i} {x:?}"); assert_eq!(m.remove(x), true); } } -- Gitee From 268b694aa73fdc8e7eb5813a05428b8e11d97a50 Mon Sep 17 00:00:00 2001 From: yangpan Date: Fri, 5 Jan 2024 10:24:11 +0800 Subject: [PATCH 3/6] =?UTF-8?q?=E4=BF=AE=E6=94=B9vendor?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/sysboostd/Cargo.toml | 2 +- vendor/rust-ini/.cargo-checksum.json | 2 +- vendor/rust-ini/Cargo.toml | 5 +- vendor/rust-ini/README.md | 99 ++++++++++++++ vendor/rust-ini/rustfmt.toml | 12 +- vendor/rust-ini/src/lib.rs | 188 ++++++++++++++++----------- 6 files changed, 226 insertions(+), 82 deletions(-) create mode 100644 vendor/rust-ini/README.md diff --git a/src/sysboostd/Cargo.toml b/src/sysboostd/Cargo.toml index 0bbb790..d763b90 100644 --- a/src/sysboostd/Cargo.toml +++ b/src/sysboostd/Cargo.toml @@ -22,7 +22,7 @@ toml = "0.5.9" inotify = "0.9" log = "0.4" goblin = "0.7" -rust-ini = "0.19" +rust-ini = "0.20.0" [dev-dependencies.tempfile] version = "3.2.0" diff --git a/vendor/rust-ini/.cargo-checksum.json b/vendor/rust-ini/.cargo-checksum.json index bc59861..a1d68c0 100644 --- a/vendor/rust-ini/.cargo-checksum.json +++ b/vendor/rust-ini/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"Cargo.lock":"6ede11c40f7c73e9bb7d564e9142acd52eada066a3002c8de13457d073dc8477","Cargo.toml":"823914fe37078a356a0388e64f23050453d12db1ca66061c99b294db47dac9f2","LICENSE":"ccf6244964385d34fef3799aa7792e9f8d35517de026f39a4f43f0e89b2079eb","README.rst":"cfe3415a25a215bd6d5f63fab904725d7a1b9ff89b358fe3fad33f2598bf5ba6","examples/test.rs":"a41ab5b4979252853e86dfe429f5fef9b79e1701987f2991ce0d2ffef9e7b210","rustfmt.toml":"15e435a3b302e6a65da0646a4dbfe949f33d5d6cc96be1fae7dcb56cfb90dabf","src/lib.rs":"453442a47e46700cbccb64559912bca141e9e1872169e3461e21d77f6bd4b291"},"package":"7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091"} \ No newline at end of file +{"files":{"Cargo.lock":"b729c317520e7406466f96902f101d95ad9e54fe269d4dca0286281df28f14bf","Cargo.toml":"28cf4eea78ba1980b30fa08839418ca598f47faf8bc81c2f3cc3f4fc5c6ae3b0","LICENSE":"ccf6244964385d34fef3799aa7792e9f8d35517de026f39a4f43f0e89b2079eb","README.md":"5b880cb00c7a233ac93feb521e3b3ecb6dac8da6821d37a5c2d8cab9bc7eb106","examples/test.rs":"a41ab5b4979252853e86dfe429f5fef9b79e1701987f2991ce0d2ffef9e7b210","rustfmt.toml":"b65e4b657ca89f75cf8a2000b682cc7ff41877bd1d14828336283227bac5a637","src/lib.rs":"e43186acbcbbc13be012ef7ea6df6dc128417420e0af88830d3eaf781ea9e11b"},"package":"3e0698206bcb8882bf2a9ecb4c1e7785db57ff052297085a6efd4fe42302068a"} \ No newline at end of file diff --git a/vendor/rust-ini/Cargo.toml b/vendor/rust-ini/Cargo.toml index 8ed21cb..927a046 100644 --- a/vendor/rust-ini/Cargo.toml +++ b/vendor/rust-ini/Cargo.toml @@ -12,10 +12,11 @@ [package] edition = "2018" name = "rust-ini" -version = "0.19.0" +version = "0.20.0" authors = ["Y. T. Chung "] description = "An Ini configuration file parsing library in Rust" documentation = "https://docs.rs/rust-ini/" +readme = "README.md" keywords = [ "ini", "configuration", @@ -33,7 +34,7 @@ test = true version = "1.0" [dependencies.ordered-multimap] -version = "0.6" +version = "0.7" [dependencies.unicase] version = "2.6" diff --git a/vendor/rust-ini/README.md b/vendor/rust-ini/README.md new file mode 100644 index 0000000..71093b7 --- /dev/null +++ b/vendor/rust-ini/README.md @@ -0,0 +1,99 @@ +# INI in Rust + +[![Build & Test](https://github.com/zonyitoo/rust-ini/actions/workflows/build-and-test.yml/badge.svg)](https://github.com/zonyitoo/rust-ini/actions/workflows/build-and-test.yml) +[![crates.io](https://img.shields.io/crates/v/rust-ini.svg)](https://crates.io/crates/rust-ini) +[![doc.rs](https://docs.rs/rust-ini/badge.svg)](https://docs.rs/rust-ini) + +[INI](http://en.wikipedia.org/wiki/INI_file) is an informal standard for configuration files for some platforms or software. INI files are simple text files with a basic structure composed of "sections" and "properties". + +This is an INI file parser in [Rust](http://www.rust-lang.org/). + +```toml +[dependencies] +rust-ini = "0.19" +``` + +## Usage + +* Create a Ini configuration file. + +```rust +extern crate ini; +use ini::Ini; + +fn main() { + let mut conf = Ini::new(); + conf.with_section(None::) + .set("encoding", "utf-8"); + conf.with_section(Some("User")) + .set("given_name", "Tommy") + .set("family_name", "Green") + .set("unicode", "Raspberry树莓"); + conf.with_section(Some("Book")) + .set("name", "Rust cool"); + conf.write_to_file("conf.ini").unwrap(); +} +``` + +Then you will get `conf.ini` + +```ini +encoding=utf-8 + +[User] +given_name=Tommy +family_name=Green +unicode=Raspberry\x6811\x8393 + +[Book] +name=Rust cool +``` + +* Read from file `conf.ini` + +```rust +use ini::Ini; + +fn main() { + let conf = Ini::load_from_file("conf.ini").unwrap(); + + let section = conf.section(Some("User")).unwrap(); + let tommy = section.get("given_name").unwrap(); + let green = section.get("family_name").unwrap(); + + println!("{:?} {:?}", tommy, green); + + // iterating + for (sec, prop) in &conf { + println!("Section: {:?}", sec); + for (key, value) in prop.iter() { + println!("{:?}:{:?}", key, value); + } + } +} +``` + +* More details could be found in `examples`. + +## License + +[The MIT License (MIT)](https://opensource.org/licenses/MIT) + +Copyright (c) 2014 Y. T. CHUNG + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/rust-ini/rustfmt.toml b/vendor/rust-ini/rustfmt.toml index 762f11c..4ee82b9 100644 --- a/vendor/rust-ini/rustfmt.toml +++ b/vendor/rust-ini/rustfmt.toml @@ -1,12 +1,18 @@ +edition = "2021" max_width = 120 -indent_style = "Visual" +#indent_style = "Visual" #fn_call_width = 120 reorder_imports = true -reorder_imports_in_group = true -reorder_imported_names = true +reorder_modules = true +#reorder_imports_in_group = true +#reorder_imported_names = true condense_wildcard_suffixes = true #fn_args_layout = "Visual" #fn_call_style = "Visual" #chain_indent = "Visual" normalize_comments = true use_try_shorthand = true +reorder_impl_items = true +#use_small_heuristics = "Max" +imports_layout = "HorizontalVertical" +imports_granularity = "Crate" diff --git a/vendor/rust-ini/src/lib.rs b/vendor/rust-ini/src/lib.rs index ab745d6..67a477f 100644 --- a/vendor/rust-ini/src/lib.rs +++ b/vendor/rust-ini/src/lib.rs @@ -42,19 +42,22 @@ //! } //! ``` -use std::char; -use std::error; -use std::fmt::{self, Display}; -use std::fs::{File, OpenOptions}; -use std::io::{self, Read, Write}; -use std::io::{Seek, SeekFrom}; -use std::ops::{Index, IndexMut}; -use std::path::Path; -use std::str::Chars; +use std::{ + char, + error, + fmt::{self, Display}, + fs::{File, OpenOptions}, + io::{self, Read, Seek, SeekFrom, Write}, + ops::{Index, IndexMut}, + path::Path, + str::Chars, +}; use cfg_if::cfg_if; -use ordered_multimap::list_ordered_multimap::{Entry, Iter, IterMut, OccupiedEntry, VacantEntry}; -use ordered_multimap::ListOrderedMultimap; +use ordered_multimap::{ + list_ordered_multimap::{Entry, IntoIter, Iter, IterMut, OccupiedEntry, VacantEntry}, + ListOrderedMultimap, +}; #[cfg(feature = "case-insensitive")] use unicase::UniCase; @@ -88,40 +91,35 @@ pub enum EscapePolicy { impl EscapePolicy { fn escape_basics(self) -> bool { - match self { - EscapePolicy::Nothing => false, - _ => true, - } + self != EscapePolicy::Nothing } fn escape_reserved(self) -> bool { - match self { - EscapePolicy::Reserved => true, - EscapePolicy::ReservedUnicode => true, - EscapePolicy::ReservedUnicodeExtended => true, - EscapePolicy::Everything => true, - _ => false, - } + matches!( + self, + EscapePolicy::Reserved + | EscapePolicy::ReservedUnicode + | EscapePolicy::ReservedUnicodeExtended + | EscapePolicy::Everything + ) } fn escape_unicode(self) -> bool { - match self { - EscapePolicy::BasicsUnicode => true, - EscapePolicy::BasicsUnicodeExtended => true, - EscapePolicy::ReservedUnicode => true, - EscapePolicy::ReservedUnicodeExtended => true, - EscapePolicy::Everything => true, - _ => false, - } + matches!( + self, + EscapePolicy::BasicsUnicode + | EscapePolicy::BasicsUnicodeExtended + | EscapePolicy::ReservedUnicode + | EscapePolicy::ReservedUnicodeExtended + | EscapePolicy::Everything + ) } fn escape_unicode_extended(self) -> bool { - match self { - EscapePolicy::BasicsUnicodeExtended => true, - EscapePolicy::ReservedUnicodeExtended => true, - EscapePolicy::Everything => true, - _ => false, - } + matches!( + self, + EscapePolicy::BasicsUnicodeExtended | EscapePolicy::ReservedUnicodeExtended | EscapePolicy::Everything + ) } /// Given a character this returns true if it should be escaped as @@ -450,7 +448,7 @@ impl Properties { } /// Remove the property with all values with the same key - pub fn remove_all<'a, S: AsRef>(&'a mut self, s: S) -> impl DoubleEndedIterator + 'a { + pub fn remove_all>(&mut self, s: S) -> impl DoubleEndedIterator + '_ { self.data.remove_all(property_get_key!(s.as_ref())) } @@ -811,37 +809,28 @@ impl Ini { pub fn write_to_opt(&self, writer: &mut W, opt: WriteOption) -> io::Result<()> { let mut firstline = true; - if let Some(props) = self.sections.get(&None) { - for (k, v) in props.iter() { - let k_str = escape_str(&k[..], opt.escape_policy); - let v_str = escape_str(&v[..], opt.escape_policy); - write!(writer, "{}={}{}", k_str, v_str, opt.line_separator)?; - - firstline = false; - } - } - for (section, props) in &self.sections { - if let Some(ref section) = *section { + if !props.data.is_empty() { if firstline { firstline = false; } else { // Write an empty line between sections writer.write_all(opt.line_separator.as_str().as_bytes())?; } + } + if let Some(ref section) = *section { write!( writer, "[{}]{}", escape_str(§ion[..], opt.escape_policy), opt.line_separator )?; - - for (k, v) in props.iter() { - let k_str = escape_str(&k[..], opt.escape_policy); - let v_str = escape_str(&v[..], opt.escape_policy); - write!(writer, "{}{}{}{}", k_str, opt.kv_separator, v_str, opt.line_separator)?; - } + } + for (k, v) in props.iter() { + let k_str = escape_str(k, opt.escape_policy); + let v_str = escape_str(v, opt.escape_policy); + write!(writer, "{}{}{}{}", k_str, opt.kv_separator, v_str, opt.line_separator)?; } } Ok(()) @@ -928,10 +917,8 @@ impl Ini { // Check if file starts with a BOM marker // UTF-8: EF BB BF let mut bom = [0u8; 3]; - if let Ok(..) = reader.read_exact(&mut bom) { - if &bom == b"\xEF\xBB\xBF" { - with_bom = true; - } + if reader.read_exact(&mut bom).is_ok() && &bom == b"\xEF\xBB\xBF" { + with_bom = true; } if !with_bom { @@ -989,6 +976,29 @@ impl DoubleEndedIterator for SectionIterMut<'_> { } } +/// Iterator for traversing sections +pub struct SectionIntoIter { + inner: IntoIter, +} + +impl Iterator for SectionIntoIter { + type Item = (SectionKey, Properties); + + fn next(&mut self) -> Option { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl DoubleEndedIterator for SectionIntoIter { + fn next_back(&mut self) -> Option { + self.inner.next_back() + } +} + impl<'a> Ini { /// Immutable iterate though sections pub fn iter(&'a self) -> SectionIter<'a> { @@ -1012,8 +1022,8 @@ impl<'a> Ini { } impl<'a> IntoIterator for &'a Ini { - type Item = (Option<&'a str>, &'a Properties); type IntoIter = SectionIter<'a>; + type Item = (Option<&'a str>, &'a Properties); fn into_iter(self) -> Self::IntoIter { self.iter() @@ -1021,14 +1031,25 @@ impl<'a> IntoIterator for &'a Ini { } impl<'a> IntoIterator for &'a mut Ini { - type Item = (Option<&'a str>, &'a mut Properties); type IntoIter = SectionIterMut<'a>; + type Item = (Option<&'a str>, &'a mut Properties); fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } +impl IntoIterator for Ini { + type IntoIter = SectionIntoIter; + type Item = (SectionKey, Properties); + + fn into_iter(self) -> Self::IntoIter { + SectionIntoIter { + inner: self.sections.into_iter(), + } + } +} + // Ini parser struct Parser<'a> { ch: Option, @@ -1182,7 +1203,7 @@ impl<'a> Parser<'a> { Err(e) => return Err(e), }, '=' | ':' => { - if (&curkey[..]).is_empty() { + if (curkey[..]).is_empty() { return self.error("missing key"); } match self.parse_val() { @@ -1571,7 +1592,7 @@ mod test { ..Default::default() }, ); - assert!(!ini.is_ok()); + assert!(ini.is_err()); let err = ini.unwrap_err(); assert_eq!(err.line, 2); @@ -2126,6 +2147,9 @@ a3 = n3 use std::str; let mut ini = Ini::new(); + ini.with_section(None::) + .set("Key1", "Value") + .set("Key2", "Value"); ini.with_section(Some("Section1")) .set("Key1", "Value") .set("Key2", "Value"); @@ -2146,12 +2170,12 @@ a3 = n3 // Test different line endings in Windows and Unix if cfg!(windows) { assert_eq!( - "[Section1]\r\nKey1 = Value\r\nKey2 = Value\r\n\r\n[Section2]\r\nKey1 = Value\r\nKey2 = Value\r\n", + "Key1 = Value\r\nKey2 = Value\r\n\r\n[Section1]\r\nKey1 = Value\r\nKey2 = Value\r\n\r\n[Section2]\r\nKey1 = Value\r\nKey2 = Value\r\n", str::from_utf8(&buf).unwrap() ); } else { assert_eq!( - "[Section1]\nKey1 = Value\nKey2 = Value\n\n[Section2]\nKey1 = Value\nKey2 = Value\n", + "Key1 = Value\nKey2 = Value\n\n[Section1]\nKey1 = Value\nKey2 = Value\n\n[Section2]\nKey1 = Value\nKey2 = Value\n", str::from_utf8(&buf).unwrap() ); } @@ -2320,7 +2344,7 @@ c = d let policy = EscapePolicy::Nothing; assert_eq!(escape_str(test_str, policy), test_str); } - + #[test] fn escape_str_basics() { let test_backslash = r"\backslashes\"; @@ -2333,14 +2357,21 @@ c = d assert_eq!(escape_str(test_controls, EscapePolicy::Nothing), test_controls); assert_eq!(escape_str(test_whitespace, EscapePolicy::Nothing), test_whitespace); - for policy in vec![ - EscapePolicy::Basics, EscapePolicy::BasicsUnicode, EscapePolicy::BasicsUnicodeExtended, - EscapePolicy::Reserved, EscapePolicy::ReservedUnicode, EscapePolicy::ReservedUnicodeExtended, + for policy in [ + EscapePolicy::Basics, + EscapePolicy::BasicsUnicode, + EscapePolicy::BasicsUnicodeExtended, + EscapePolicy::Reserved, + EscapePolicy::ReservedUnicode, + EscapePolicy::ReservedUnicodeExtended, EscapePolicy::Everything, ] { assert_eq!(escape_str(test_backslash, policy), r"\\backslashes\\"); assert_eq!(escape_str(test_nul, policy), r"string with \0nulls\0 in it"); - assert_eq!(escape_str(test_controls, policy), r"|\a| bell, |\b| backspace, |\x007f| delete, |\x001b| escape"); + assert_eq!( + escape_str(test_controls, policy), + r"|\a| bell, |\b| backspace, |\x007f| delete, |\x001b| escape" + ); assert_eq!(escape_str(test_whitespace, policy), r"\t \r\n"); } } @@ -2353,17 +2384,21 @@ c = d let test_punctuation = "!@$%^&*()-_+/?.>,<[]{}``"; // These policies should *not* escape reserved characters. - for policy in vec![ + for policy in [ EscapePolicy::Nothing, - EscapePolicy::Basics, EscapePolicy::BasicsUnicode, EscapePolicy::BasicsUnicodeExtended, + EscapePolicy::Basics, + EscapePolicy::BasicsUnicode, + EscapePolicy::BasicsUnicodeExtended, ] { assert_eq!(escape_str(test_reserved, policy), ":=;#"); assert_eq!(escape_str(test_punctuation, policy), test_punctuation); } // These should. - for policy in vec![ - EscapePolicy::Reserved, EscapePolicy::ReservedUnicodeExtended, EscapePolicy::ReservedUnicode, + for policy in [ + EscapePolicy::Reserved, + EscapePolicy::ReservedUnicodeExtended, + EscapePolicy::ReservedUnicode, EscapePolicy::Everything, ] { assert_eq!(escape_str(test_reserved, policy), r"\:\=\;\#"); @@ -2389,7 +2424,7 @@ c = d // The "Unicode" policies should escape standard BMP unicode, but should *not* escape emoji or supplementary CJK codepoints. // The Basics/Reserved policies should behave identically in this regard. - for policy in vec![EscapePolicy::BasicsUnicode, EscapePolicy::ReservedUnicode] { + for policy in [EscapePolicy::BasicsUnicode, EscapePolicy::ReservedUnicode] { assert_eq!(escape_str(test_unicode, policy), r"\x00e9\x00a3\x2233\x5b57\x2728"); assert_eq!(escape_str(test_emoji, policy), test_emoji); assert_eq!(escape_str(test_cjk, policy), test_cjk); @@ -2397,7 +2432,10 @@ c = d } // UnicodeExtended policies should escape both BMP and supplementary plane characters. - for policy in vec![EscapePolicy::BasicsUnicodeExtended, EscapePolicy::ReservedUnicodeExtended] { + for policy in [ + EscapePolicy::BasicsUnicodeExtended, + EscapePolicy::ReservedUnicodeExtended, + ] { assert_eq!(escape_str(test_unicode, policy), r"\x00e9\x00a3\x2233\x5b57\x2728"); assert_eq!(escape_str(test_emoji, policy), r"\x1f431\x1f609"); assert_eq!(escape_str(test_cjk, policy), r"\x2020c\x20547"); -- Gitee From b9a069bebb1ef30f511fd708913329124d2092bd Mon Sep 17 00:00:00 2001 From: yangpan Date: Fri, 5 Jan 2024 10:33:24 +0800 Subject: [PATCH 4/6] =?UTF-8?q?=E4=BF=AE=E6=94=B9vendor--3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- vendor/getrandom/.cargo-checksum.json | 2 +- vendor/getrandom/CHANGELOG.md | 13 -- vendor/getrandom/Cargo.toml | 14 +-- vendor/getrandom/README.md | 2 +- vendor/getrandom/src/custom.rs | 2 +- vendor/getrandom/src/hurd.rs | 18 --- vendor/getrandom/src/lib.rs | 9 +- vendor/getrandom/src/util_libc.rs | 2 +- vendor/ordered-multimap/.cargo-checksum.json | 2 +- vendor/ordered-multimap/CHANGELOG.md | 12 ++ vendor/ordered-multimap/Cargo.toml | 6 +- vendor/ordered-multimap/src/lib.rs | 2 +- .../src/list_ordered_multimap.rs | 93 +++++++------- vendor/rust-ini/README.rst | 114 ------------------ 14 files changed, 66 insertions(+), 225 deletions(-) delete mode 100644 vendor/getrandom/src/hurd.rs delete mode 100644 vendor/rust-ini/README.rst diff --git a/vendor/getrandom/.cargo-checksum.json b/vendor/getrandom/.cargo-checksum.json index fc46f72..5d386a4 100644 --- a/vendor/getrandom/.cargo-checksum.json +++ b/vendor/getrandom/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"fe1a0dc50ac5c7bdd60591f6d1027072c68dcf142131945f782169c74b9e8188","Cargo.toml":"5506345251dee6e156a3d0072d2b3b6bc6894d8cf91adb85fefe211741e7c7f9","LICENSE-APACHE":"aaff376532ea30a0cd5330b9502ad4a4c8bf769c539c87ffe78819d188a18ebf","LICENSE-MIT":"209fbbe0ad52d9235e37badf9cadfe4dbdc87203179c0899e738b39ade42177b","README.md":"e5858de17fc28ec7a3a62cbefedd301ace8a85291d0aad5cb416824d1b5abadd","SECURITY.md":"816ea79f8c7937888ab5a972a1efb270c4bada028b448953a195359fe11d526e","benches/buffer.rs":"242f56eaeecd1d0a0f6f9419d1bf312b8d211215355022bd4aef5e5e0f53e2a5","src/3ds.rs":"e41b653723740ed89fa68f495b64125786e8dec002e3556d164c5795db62ea50","src/apple-other.rs":"3ff0abc72786a2ac063cdc5df4d18cc53dc493cd68fcb33734cf40cfdbb8f644","src/bsd_arandom.rs":"cfa0627a6b4d1f37065d415972ab813bf1c9f43979d2ff9440c92a53868123aa","src/custom.rs":"a256bd6e7e9bb560803f23a36bd437859ea8a9d8ec92608930b94b33e7314c64","src/dragonfly.rs":"047008e742a7a8050e61ed9626b9f4146dfaa0675e11d6f3680eb8af498d9a6d","src/emscripten.rs":"e0b3b44b52f54454ec3e0a9e7c5222003369d9d1575cc0652e3e7cbe1b3b6da7","src/error.rs":"ff09a7e02d7aff3e45eca6bbef6c686cc46f3c2371a0897a856e4dec4b942e46","src/error_impls.rs":"9c34832ebb99cd5e31bc5c8ffc5beb5b3fa6f7ff0226aaa1cdf8e10e6d64b324","src/espidf.rs":"915ca14cbf9299de51a3c67f34fdd252461d6545f33a7232dfb7fa247ccc0209","src/fuchsia.rs":"d307b15db9f2d67b43050ae6027779a6eb2b8a69e1e89931b55b767aa2622250","src/hermit.rs":"18fdd7917c73f8b16aa82b18003948d32f9b314da10e16ef9cd2fa077b17af00","src/hurd.rs":"1053908c4eaeae9e44078c9509aa80268caa1d66642b7c6a9a80f5b9f0e63fb0","src/js.rs":"c4cd60bcfe63f8affe947773197e288536ab205a73001059f39fc2e5688e98b6","src/lib.rs":"178b4b1dae3a41721f365ea5a4eda3f5b936b310afa4431935968e96edac3120","src/linux_android.rs":"e5f9e579bbde254fcab8f6b79b893d6b74054e023b21c56a3b2b21d8f4b4d825","src/macos.rs":"8f51e095906e751b68e837bfc63cc02b243e1698b66353566ccba507c81ddad3","src/openbsd.rs":"f6fd0aa74f704335a7e0532bf5e61a7ca90b0cbc398a9c01a0fd891b6fabca0c","src/rdrand.rs":"846ac7b8380a05a50e0592dca57338beb1634c0efc878d6d1e9421be3469a744","src/solaris_illumos.rs":"7209c8b1172fc4df5ad8a79f165556b403cdd90b9eb5f7f7f9ec97bf06f4d8d7","src/solid.rs":"58919109faf06e6d546f75f785d78d6c055e1f95110d1791d9191d1e404f1e20","src/use_file.rs":"ecfc1011b4a9c962ae9b4b75ca5149a4ee83cb0951a80224ce5417046ce11717","src/util.rs":"580fb7c4e41eb6007def8626e019829c22a63980fa4da68a1adef687c57953a2","src/util_libc.rs":"48c1fe251958c6c57b7c93d83f3648d97034feeee0d5cda0cbe7bc0ee0a73fca","src/vita.rs":"ecfa9d347ad5c480ba8ff80a9de968ae060ffb435f1e95777ee413642e62e50a","src/vxworks.rs":"984726b6dd9638a38ceda83124683419b9d69a9041ad9117a470eaec5b386ce4","src/wasi.rs":"229a58af3f13a629571fb83a0c11ef0ed696ba7a44ee2e811c9f348a19b2fb69","src/windows.rs":"dd3d833979fb6b96c04b84dbf8461d5fc819bde93ad9dc26bd0f6c282656c733","tests/common/mod.rs":"b9a36043d71963ba43a9e2899ba8eea80ff9f3284d243d9b9b9f941afa4f4aa4","tests/custom.rs":"1e944ae523b62dba53fe3daf1b964a2498c8fdd21dfa7afe53781bff2fcf276e","tests/normal.rs":"9e1c4b1e468a09ed0225370dfb6608f8b8135e0fabb09bbc1a718105164aade6","tests/rdrand.rs":"156676b57f1e6bd4d66d85b8a999f1cf7a8fb749a10b8b2b4dbbcf803e8c4cd3"},"package":"fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f"} \ No newline at end of file +{"files":{"CHANGELOG.md":"43be031835bc3574d3458703679d40fc6d94afe7d39d27cd131df46bf7d649d4","Cargo.toml":"415d907455f25d51f476a3af7bfaf6ab4a8cc87567690b29955d1e5d97f8b3a7","LICENSE-APACHE":"aaff376532ea30a0cd5330b9502ad4a4c8bf769c539c87ffe78819d188a18ebf","LICENSE-MIT":"209fbbe0ad52d9235e37badf9cadfe4dbdc87203179c0899e738b39ade42177b","README.md":"e6b00741964e2d1e3e3ca8cbf4688c88b882f6bcef8a9d6b53ec59f5ea6ccefb","SECURITY.md":"816ea79f8c7937888ab5a972a1efb270c4bada028b448953a195359fe11d526e","benches/buffer.rs":"242f56eaeecd1d0a0f6f9419d1bf312b8d211215355022bd4aef5e5e0f53e2a5","src/3ds.rs":"e41b653723740ed89fa68f495b64125786e8dec002e3556d164c5795db62ea50","src/apple-other.rs":"3ff0abc72786a2ac063cdc5df4d18cc53dc493cd68fcb33734cf40cfdbb8f644","src/bsd_arandom.rs":"cfa0627a6b4d1f37065d415972ab813bf1c9f43979d2ff9440c92a53868123aa","src/custom.rs":"16bdf3bd6fca0f370f42c217a1e8737c0c549a6820c7b61919d5a2d2bf675b48","src/dragonfly.rs":"047008e742a7a8050e61ed9626b9f4146dfaa0675e11d6f3680eb8af498d9a6d","src/emscripten.rs":"e0b3b44b52f54454ec3e0a9e7c5222003369d9d1575cc0652e3e7cbe1b3b6da7","src/error.rs":"ff09a7e02d7aff3e45eca6bbef6c686cc46f3c2371a0897a856e4dec4b942e46","src/error_impls.rs":"9c34832ebb99cd5e31bc5c8ffc5beb5b3fa6f7ff0226aaa1cdf8e10e6d64b324","src/espidf.rs":"915ca14cbf9299de51a3c67f34fdd252461d6545f33a7232dfb7fa247ccc0209","src/fuchsia.rs":"d307b15db9f2d67b43050ae6027779a6eb2b8a69e1e89931b55b767aa2622250","src/hermit.rs":"18fdd7917c73f8b16aa82b18003948d32f9b314da10e16ef9cd2fa077b17af00","src/js.rs":"c4cd60bcfe63f8affe947773197e288536ab205a73001059f39fc2e5688e98b6","src/lib.rs":"be7d74e035960cbea4b3a73afad8dec80449a50bf9d53895321e1ef04d064b45","src/linux_android.rs":"e5f9e579bbde254fcab8f6b79b893d6b74054e023b21c56a3b2b21d8f4b4d825","src/macos.rs":"8f51e095906e751b68e837bfc63cc02b243e1698b66353566ccba507c81ddad3","src/openbsd.rs":"f6fd0aa74f704335a7e0532bf5e61a7ca90b0cbc398a9c01a0fd891b6fabca0c","src/rdrand.rs":"846ac7b8380a05a50e0592dca57338beb1634c0efc878d6d1e9421be3469a744","src/solaris_illumos.rs":"7209c8b1172fc4df5ad8a79f165556b403cdd90b9eb5f7f7f9ec97bf06f4d8d7","src/solid.rs":"58919109faf06e6d546f75f785d78d6c055e1f95110d1791d9191d1e404f1e20","src/use_file.rs":"ecfc1011b4a9c962ae9b4b75ca5149a4ee83cb0951a80224ce5417046ce11717","src/util.rs":"580fb7c4e41eb6007def8626e019829c22a63980fa4da68a1adef687c57953a2","src/util_libc.rs":"ee06b1d59eb70b11ec934cc2a6a4e271ecbe0610c32dea35c8d677caebf4c898","src/vita.rs":"ecfa9d347ad5c480ba8ff80a9de968ae060ffb435f1e95777ee413642e62e50a","src/vxworks.rs":"984726b6dd9638a38ceda83124683419b9d69a9041ad9117a470eaec5b386ce4","src/wasi.rs":"229a58af3f13a629571fb83a0c11ef0ed696ba7a44ee2e811c9f348a19b2fb69","src/windows.rs":"dd3d833979fb6b96c04b84dbf8461d5fc819bde93ad9dc26bd0f6c282656c733","tests/common/mod.rs":"b9a36043d71963ba43a9e2899ba8eea80ff9f3284d243d9b9b9f941afa4f4aa4","tests/custom.rs":"1e944ae523b62dba53fe3daf1b964a2498c8fdd21dfa7afe53781bff2fcf276e","tests/normal.rs":"9e1c4b1e468a09ed0225370dfb6608f8b8135e0fabb09bbc1a718105164aade6","tests/rdrand.rs":"156676b57f1e6bd4d66d85b8a999f1cf7a8fb749a10b8b2b4dbbcf803e8c4cd3"},"package":"be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"} \ No newline at end of file diff --git a/vendor/getrandom/CHANGELOG.md b/vendor/getrandom/CHANGELOG.md index 7b1f46a..1b57e18 100644 --- a/vendor/getrandom/CHANGELOG.md +++ b/vendor/getrandom/CHANGELOG.md @@ -4,18 +4,6 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.2.11] - 2023-11-08 -### Added -- GNU/Hurd support [#370] - -### Changed -- Renamed `__getrandom_internal` to `__GETRANDOM_INTERNAL` [#369] -- Updated link to Hermit docs [#374] - -[#369]: https://github.com/rust-random/getrandom/pull/369 -[#370]: https://github.com/rust-random/getrandom/pull/370 -[#374]: https://github.com/rust-random/getrandom/pull/374 - ## [0.2.10] - 2023-06-06 ### Added - Support for PS Vita (`armv7-sony-vita-newlibeabihf`) [#359] @@ -403,7 +391,6 @@ Publish initial implementation. ## [0.0.0] - 2019-01-19 Publish an empty template library. -[0.2.11]: https://github.com/rust-random/getrandom/compare/v0.2.10...v0.2.11 [0.2.10]: https://github.com/rust-random/getrandom/compare/v0.2.9...v0.2.10 [0.2.9]: https://github.com/rust-random/getrandom/compare/v0.2.8...v0.2.9 [0.2.8]: https://github.com/rust-random/getrandom/compare/v0.2.7...v0.2.8 diff --git a/vendor/getrandom/Cargo.toml b/vendor/getrandom/Cargo.toml index a4c3946..352bb02 100644 --- a/vendor/getrandom/Cargo.toml +++ b/vendor/getrandom/Cargo.toml @@ -12,7 +12,7 @@ [package] edition = "2018" name = "getrandom" -version = "0.2.11" +version = "0.2.10" authors = ["The Rand Project Developers"] exclude = [".*"] description = "A small cross-platform library for retrieving random data from system source" @@ -25,16 +25,6 @@ categories = [ license = "MIT OR Apache-2.0" repository = "https://github.com/rust-random/getrandom" -[package.metadata.cross.target.x86_64-unknown-netbsd] -pre-build = [ - "mkdir -p /tmp/netbsd", - "curl https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.2/amd64/binary/sets/base.tar.xz -O", - "tar -C /tmp/netbsd -xJf base.tar.xz", - "cp /tmp/netbsd/usr/lib/libexecinfo.so /usr/local/x86_64-unknown-netbsd/lib", - "rm base.tar.xz", - "rm -rf /tmp/netbsd", -] - [package.metadata.docs.rs] features = [ "std", @@ -90,5 +80,5 @@ version = "0.11" default-features = false [target."cfg(unix)".dependencies.libc] -version = "0.2.149" +version = "0.2.143" default-features = false diff --git a/vendor/getrandom/README.md b/vendor/getrandom/README.md index c43ad42..404b383 100644 --- a/vendor/getrandom/README.md +++ b/vendor/getrandom/README.md @@ -3,7 +3,7 @@ [![Build Status]][GitHub Actions] [![Crate]][crates.io] [![Documentation]][docs.rs] [![Dependency Status]][deps.rs] [![Downloads]][crates.io] [![License]][LICENSE-MIT] [GitHub Actions]: https://github.com/rust-random/getrandom/actions?query=workflow:Tests+branch:master -[Build Status]: https://github.com/rust-random/getrandom/actions/workflows/tests.yml/badge.svg?branch=master +[Build Status]: https://github.com/rust-random/getrandom/workflows/Tests/badge.svg?branch=master [crates.io]: https://crates.io/crates/getrandom [Crate]: https://img.shields.io/crates/v/getrandom [docs.rs]: https://docs.rs/getrandom diff --git a/vendor/getrandom/src/custom.rs b/vendor/getrandom/src/custom.rs index 66e4256..c9207d0 100644 --- a/vendor/getrandom/src/custom.rs +++ b/vendor/getrandom/src/custom.rs @@ -77,7 +77,7 @@ use core::{mem::MaybeUninit, num::NonZeroU32}; macro_rules! register_custom_getrandom { ($path:path) => { // TODO(MSRV 1.37): change to unnamed block - const __GETRANDOM_INTERNAL: () = { + const __getrandom_internal: () = { // We use Rust ABI to be safe against potential panics in the passed function. #[no_mangle] unsafe fn __getrandom_custom(dest: *mut u8, len: usize) -> u32 { diff --git a/vendor/getrandom/src/hurd.rs b/vendor/getrandom/src/hurd.rs deleted file mode 100644 index 842b9bc..0000000 --- a/vendor/getrandom/src/hurd.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2021 Developers of the Rand project. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Implementation for GNU/Hurd -use crate::util_libc::sys_fill_exact; -use crate::Error; -use core::mem::MaybeUninit; - -pub fn getrandom_inner(dest: &mut [MaybeUninit]) -> Result<(), Error> { - sys_fill_exact(dest, |buf| unsafe { - libc::getrandom(buf.as_mut_ptr() as *mut libc::c_void, buf.len(), 0) - }) -} diff --git a/vendor/getrandom/src/lib.rs b/vendor/getrandom/src/lib.rs index 10cc227..cd8a960 100644 --- a/vendor/getrandom/src/lib.rs +++ b/vendor/getrandom/src/lib.rs @@ -25,7 +25,6 @@ //! | Redox | `*‑redox` | `/dev/urandom` //! | Haiku | `*‑haiku` | `/dev/urandom` (identical to `/dev/random`) //! | Hermit | `*-hermit` | [`sys_read_entropy`] -//! | Hurd | `*-hurd-*` | [`getrandom`][17] //! | SGX | `x86_64‑*‑sgx` | [`RDRAND`] //! | VxWorks | `*‑wrs‑vxworks‑*` | `randABytes` after checking entropy pool initialization with `randSecure` //! | ESP-IDF | `*‑espidf` | [`esp_fill_random`] @@ -167,7 +166,6 @@ //! [14]: https://www.qnx.com/developers/docs/7.1/index.html#com.qnx.doc.neutrino.utilities/topic/r/random.html //! [15]: https://www.ibm.com/docs/en/aix/7.3?topic=files-random-urandom-devices //! [16]: https://man.netbsd.org/getrandom.2 -//! [17]: https://www.gnu.org/software/libc/manual/html_mono/libc.html#index-getrandom //! //! [`BCryptGenRandom`]: https://docs.microsoft.com/en-us/windows/win32/api/bcrypt/nf-bcrypt-bcryptgenrandom //! [`Crypto.getRandomValues`]: https://www.w3.org/TR/WebCryptoAPI/#Crypto-method-getRandomValues @@ -182,12 +180,12 @@ //! [`module`]: https://rustwasm.github.io/wasm-bindgen/reference/attributes/on-js-imports/module.html //! [CommonJS modules]: https://nodejs.org/api/modules.html //! [ES modules]: https://nodejs.org/api/esm.html -//! [`sys_read_entropy`]: https://github.com/hermit-os/kernel/blob/315f58ff5efc81d9bf0618af85a59963ff55f8b1/src/syscalls/entropy.rs#L47-L55 +//! [`sys_read_entropy`]: https://hermitcore.github.io/libhermit-rs/hermit/fn.sys_read_entropy.html #![doc( html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png", html_favicon_url = "https://www.rust-lang.org/favicon.ico", - html_root_url = "https://docs.rs/getrandom/0.2.11" + html_root_url = "https://docs.rs/getrandom/0.2.10" )] #![no_std] #![warn(rust_2018_idioms, unused_lifetimes, missing_docs)] @@ -280,9 +278,6 @@ cfg_if! { any(target_arch = "wasm32", target_arch = "wasm64"), target_os = "unknown"))] { #[path = "js.rs"] mod imp; - } else if #[cfg(target_os = "hurd")] { - mod util_libc; - #[path = "hurd.rs"] mod imp; } else if #[cfg(feature = "custom")] { use custom as imp; } else if #[cfg(all(any(target_arch = "wasm32", target_arch = "wasm64"), diff --git a/vendor/getrandom/src/util_libc.rs b/vendor/getrandom/src/util_libc.rs index 99bee38..4b94144 100644 --- a/vendor/getrandom/src/util_libc.rs +++ b/vendor/getrandom/src/util_libc.rs @@ -19,7 +19,7 @@ use libc::c_void; cfg_if! { if #[cfg(any(target_os = "netbsd", target_os = "openbsd", target_os = "android"))] { use libc::__errno as errno_location; - } else if #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "hurd", target_os = "redox"))] { + } else if #[cfg(any(target_os = "linux", target_os = "emscripten", target_os = "redox"))] { use libc::__errno_location as errno_location; } else if #[cfg(any(target_os = "solaris", target_os = "illumos"))] { use libc::___errno as errno_location; diff --git a/vendor/ordered-multimap/.cargo-checksum.json b/vendor/ordered-multimap/.cargo-checksum.json index e28e2c5..641f18f 100644 --- a/vendor/ordered-multimap/.cargo-checksum.json +++ b/vendor/ordered-multimap/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"addf5766dd985eecd129abf122eee8fdbb0c490f365edb9254aa0f9195895d7c","CONTRIBUTING.md":"c67c1ce7c5f0d1aa5009db0ba42c6a32da2e2d5a6b517f38c0308d8ffa5ae083","Cargo.toml":"60ed14a557a877cbf8cee01f1e76541f094d5c5e875a4ff5d2b824b20c10b69a","LICENSE":"047c1d2f1c28c30ced89bd0740ff251d8f51512e81b142711f958a0551729ec4","README.md":"71553048ddfb5a2ba5817c4c8bea138b83bc75c953d289637110bde93e4f2125","codecov.yml":"550982ef37ab56e6c3e06351f359f2407855a54c27f7e9c7871b855aa34c9109","rustfmt.toml":"9d197f8ce3b24c6aa98627d614420d5291fde7c5442cf77a7f8718dc9375f361","src/lib.rs":"357fa15e37bb947d61acb8224fcaddc23087cf88ae96b730966898fc9cedfaed","src/list_ordered_multimap.rs":"dee8047febf734701197c7aa5220bdb6ff33a63d1f515263e982e565842c76b1","src/serde.rs":"aa0db90076cc20dffe26677e8fa00437dc3fc84b9cf38b469836f99a1b010b89"},"package":"4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e"} \ No newline at end of file +{"files":{"CHANGELOG.md":"405aea42de92645437aee5a58581905030d0f6c4192343e8525a8e14e1b77dbc","CONTRIBUTING.md":"c67c1ce7c5f0d1aa5009db0ba42c6a32da2e2d5a6b517f38c0308d8ffa5ae083","Cargo.toml":"96e5b52351048e6e1d1dd05629b1490bcc730bff4147ef02baee69554b503996","LICENSE":"047c1d2f1c28c30ced89bd0740ff251d8f51512e81b142711f958a0551729ec4","README.md":"71553048ddfb5a2ba5817c4c8bea138b83bc75c953d289637110bde93e4f2125","codecov.yml":"550982ef37ab56e6c3e06351f359f2407855a54c27f7e9c7871b855aa34c9109","rustfmt.toml":"9d197f8ce3b24c6aa98627d614420d5291fde7c5442cf77a7f8718dc9375f361","src/lib.rs":"cd27ce73db974c556ecf95395449fcb7648bb248391dbaf0035c85adc6124c78","src/list_ordered_multimap.rs":"b3daf42c0a14aa3253bd8dd726ca7291781b380c463051f4a9061596e0516a56","src/serde.rs":"aa0db90076cc20dffe26677e8fa00437dc3fc84b9cf38b469836f99a1b010b89"},"package":"a4d6a8c22fc714f0c2373e6091bf6f5e9b37b1bc0b1184874b7e0a4e303d318f"} \ No newline at end of file diff --git a/vendor/ordered-multimap/CHANGELOG.md b/vendor/ordered-multimap/CHANGELOG.md index 1e7d092..4968053 100644 --- a/vendor/ordered-multimap/CHANGELOG.md +++ b/vendor/ordered-multimap/CHANGELOG.md @@ -7,6 +7,18 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), ## [Unreleased] +# 0.7.1 - 10-24-2023 + +### Changed + + - Updated `coverage-helper` dependency to `0.2.0`. + +# 0.7.0 - 08-23-2023 + +### Changed + + - Updated `hashbrown` dependency to `0.14.0` + # 0.6.0 - 01-21-2023 ### Added diff --git a/vendor/ordered-multimap/Cargo.toml b/vendor/ordered-multimap/Cargo.toml index cdc196c..08c9944 100644 --- a/vendor/ordered-multimap/Cargo.toml +++ b/vendor/ordered-multimap/Cargo.toml @@ -12,7 +12,7 @@ [package] edition = "2021" name = "ordered-multimap" -version = "0.6.0" +version = "0.7.1" authors = ["Scott Godwin "] description = "Insertion ordered multimap" readme = "README.md" @@ -25,7 +25,7 @@ version = "0.5" default-features = false [dependencies.hashbrown] -version = "0.13.2" +version = "0.14.0" default-features = false [dependencies.serde] @@ -34,7 +34,7 @@ optional = true default-features = false [dev-dependencies.coverage-helper] -version = "0.1.0" +version = "0.2.0" [dev-dependencies.serde_test] version = "1.0.144" diff --git a/vendor/ordered-multimap/src/lib.rs b/vendor/ordered-multimap/src/lib.rs index 06244a9..782f1db 100644 --- a/vendor/ordered-multimap/src/lib.rs +++ b/vendor/ordered-multimap/src/lib.rs @@ -3,7 +3,7 @@ //! //! See the type documentation for more information. -#![cfg_attr(coverage_nightly, feature(no_coverage))] +#![cfg_attr(coverage_nightly, feature(coverage_attribute))] #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; diff --git a/vendor/ordered-multimap/src/list_ordered_multimap.rs b/vendor/ordered-multimap/src/list_ordered_multimap.rs index 08235a6..3353192 100644 --- a/vendor/ordered-multimap/src/list_ordered_multimap.rs +++ b/vendor/ordered-multimap/src/list_ordered_multimap.rs @@ -697,9 +697,8 @@ where /// assert_eq!(map.values_len(), 2); /// ``` pub fn append(&mut self, key: Key, value: Value) -> bool { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(&key); let entry = raw_entry_mut(&self.keys, &mut self.map, hash, &key); - let build_hasher = &self.build_hasher; match entry { RawEntryMut::Occupied(mut entry) => { @@ -723,7 +722,7 @@ where let keys = &self.keys; let _ = entry.insert_with_hasher(hash, key_index, MapEntry::new(index), |&key_index| { let key = keys.get(key_index).unwrap(); - hash_key(build_hasher, key) + self.build_hasher.hash_one(key) }); false } @@ -750,7 +749,7 @@ where Key: Borrow, KeyQuery: ?Sized + Eq + Hash, { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(key); raw_entry(&self.keys, &self.map, hash, key).is_some() } @@ -770,7 +769,7 @@ where /// ``` #[must_use] pub fn entry(&mut self, key: Key) -> Entry<'_, Key, Value, State> { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(&key); // TODO: This ugliness arises from borrow checking issues which seems to happen when the vacant entry is created in // the match block further below for `Vacant` even though it should be perfectly safe. Is there a better way to do @@ -820,7 +819,7 @@ where Key: Borrow, KeyQuery: ?Sized + Eq + Hash, { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(key); match raw_entry(&self.keys, &self.map, hash, key) { Some((_, map_entry)) => map_entry.length, @@ -848,7 +847,7 @@ where Key: Borrow, KeyQuery: ?Sized + Eq + Hash, { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(key); let (_, map_entry) = raw_entry(&self.keys, &self.map, hash, key)?; self .values @@ -881,7 +880,7 @@ where Key: Borrow, KeyQuery: ?Sized + Eq + Hash, { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(key); match raw_entry(&self.keys, &self.map, hash, key) { Some((_, map_entry)) => EntryValues::from_map_entry(&self.values, map_entry), @@ -919,7 +918,7 @@ where Key: Borrow, KeyQuery: ?Sized + Eq + Hash, { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(key); match raw_entry(&self.keys, &self.map, hash, key) { Some((_, map_entry)) => EntryValuesMut::from_map_entry(&mut self.values, map_entry), @@ -954,7 +953,7 @@ where Key: Borrow, KeyQuery: ?Sized + Eq + Hash, { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(key); let (_, map_entry) = raw_entry(&self.keys, &self.map, hash, key)?; self .values @@ -1029,9 +1028,8 @@ where /// assert_eq!(map.get(&"key"), Some(&"value3")); /// ``` pub fn insert_all(&mut self, key: Key, value: Value) -> EntryValuesDrain<'_, Key, Value> { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(&key); let entry = raw_entry_mut(&self.keys, &mut self.map, hash, &key); - let build_hasher = &self.build_hasher; match entry { RawEntryMut::Occupied(mut entry) => { @@ -1050,7 +1048,7 @@ where let keys = &self.keys; let _ = entry.insert_with_hasher(hash, key_index, MapEntry::new(index), |&key_index| { let key = keys.get(key_index).unwrap(); - hash_key(build_hasher, key) + self.build_hasher.hash_one(key) }); EntryValuesDrain::empty(&mut self.values) } @@ -1103,7 +1101,6 @@ where let key_map = self.keys.pack_to(keys_minimum_capacity); let value_map = self.values.pack_to(values_minimum_capacity); let mut map = HashMap::with_capacity_and_hasher(keys_minimum_capacity, DummyState); - let build_hasher = &self.build_hasher; for value_entry in self.values.iter_mut() { value_entry.key_index = key_map[&value_entry.key_index]; @@ -1116,14 +1113,14 @@ where map_entry.tail_index = value_map[&map_entry.tail_index]; let key_index = key_map[&key_index]; let key = self.keys.get(key_index).unwrap(); - let hash = hash_key(&self.build_hasher, key); + let hash = self.build_hasher.hash_one(key); match map.raw_entry_mut().from_hash(hash, |_| false) { RawEntryMut::Vacant(entry) => { let keys = &self.keys; let _ = entry.insert_with_hasher(hash, key_index, map_entry, |&key_index| { let key = keys.get(key_index).unwrap(); - hash_key(build_hasher, key) + self.build_hasher.hash_one(key) }); } _ => panic!("expected vacant entry"), @@ -1199,7 +1196,7 @@ where let key_wrapper = match value_entry.previous_index { Some(previous_index) => { let key = self.keys.get(value_entry.key_index).unwrap(); - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(key); let mut entry = match raw_entry_mut(&self.keys, &mut self.map, hash, key) { RawEntryMut::Occupied(entry) => entry, @@ -1216,7 +1213,7 @@ where } None => { let key = self.keys.remove(value_entry.key_index).unwrap(); - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(&key); match raw_entry_mut_empty(&self.keys, &mut self.map, hash) { RawEntryMut::Occupied(entry) => { @@ -1268,7 +1265,7 @@ where let key_wrapper = match value_entry.next_index { Some(next_index) => { let key = self.keys.get(value_entry.key_index).unwrap(); - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(key); let mut entry = match raw_entry_mut(&self.keys, &mut self.map, hash, key) { RawEntryMut::Occupied(entry) => entry, @@ -1285,7 +1282,7 @@ where } None => { let key = self.keys.remove(value_entry.key_index).unwrap(); - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(&key); match raw_entry_mut_empty(&self.keys, &mut self.map, hash) { RawEntryMut::Occupied(entry) => { @@ -1366,7 +1363,7 @@ where Key: Borrow, KeyQuery: ?Sized + Eq + Hash, { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(key); let entry = raw_entry_mut(&self.keys, &mut self.map, hash, key); match entry { @@ -1452,7 +1449,7 @@ where Key: Borrow, KeyQuery: ?Sized + Eq + Hash, { - let hash = hash_key(&self.build_hasher, &key); + let hash = self.build_hasher.hash_one(key); let entry = raw_entry_mut(&self.keys, &mut self.map, hash, key); match entry { @@ -1497,7 +1494,7 @@ where for (key_index, map_entry) in self.map.drain() { let key = self.keys.get(key_index).unwrap(); - let hash = hash_key(&self.build_hasher, key); + let hash = self.build_hasher.hash_one(key); let entry = match raw_entry_mut(&self.keys, &mut map, hash, key) { RawEntryMut::Vacant(entry) => entry, _ => panic!("expected vacant entry"), @@ -1563,7 +1560,7 @@ where if function(key, &mut value_entry.value) { true } else { - let hash = hash_key(build_hasher, key); + let hash = build_hasher.hash_one(key); let mut entry = match raw_entry_mut(keys, map, hash, key) { RawEntryMut::Occupied(entry) => entry, _ => panic!("expected occupied entry in internal map"), @@ -2478,7 +2475,6 @@ where /// assert_eq!(entry.insert("value"), &"value"); /// ``` pub fn insert(self, value: Value) -> &'map mut Value { - let build_hasher = self.build_hasher; let entry = match raw_entry_mut(self.keys, self.map, self.hash, &self.key) { RawEntryMut::Vacant(entry) => entry, _ => panic!("expected vacant entry"), @@ -2490,7 +2486,7 @@ where let keys = &self.keys; let _ = entry.insert_with_hasher(self.hash, key_index, map_entry, |&key_index| { let key = keys.get(key_index).unwrap(); - hash_key(build_hasher, key) + self.build_hasher.hash_one(key) }); &mut self.values.get_mut(index).unwrap().value @@ -2513,7 +2509,6 @@ where /// assert_eq!(entry.get(), &"value"); /// ``` pub fn insert_entry(self, value: Value) -> OccupiedEntry<'map, Key, Value> { - let build_hasher = self.build_hasher; let entry = match raw_entry_mut(self.keys, self.map, self.hash, &self.key) { RawEntryMut::Vacant(entry) => entry, _ => panic!("expected vacant entry"), @@ -2525,7 +2520,7 @@ where let keys = &self.keys; let _ = entry.insert_with_hasher(self.hash, key_index, map_entry, |&key_index| { let key = keys.get(key_index).unwrap(); - hash_key(build_hasher, key) + self.build_hasher.hash_one(key) }); let key = self.keys.get(key_index).unwrap(); @@ -3166,7 +3161,7 @@ where { fn next_back(&mut self) -> Option { let key = self.iter.next_back()?; - let hash = hash_key(self.build_hasher, key); + let hash = self.build_hasher.hash_one(key); let (_, map_entry) = raw_entry(self.keys, self.map, hash, key).unwrap(); let iter = EntryValues::from_map_entry(self.values, map_entry); Some((key, iter)) @@ -3196,7 +3191,7 @@ where fn next(&mut self) -> Option { let key = self.iter.next()?; - let hash = hash_key(self.build_hasher, key); + let hash = self.build_hasher.hash_one(key); let (_, map_entry) = raw_entry(self.keys, self.map, hash, key).unwrap(); let iter = EntryValues::from_map_entry(self.values, map_entry); Some((key, iter)) @@ -3260,7 +3255,7 @@ where { fn next_back(&mut self) -> Option { let key = self.iter.next_back()?; - let hash = hash_key(self.build_hasher, key); + let hash = self.build_hasher.hash_one(key); let (_, map_entry) = raw_entry(self.keys, self.map, hash, key).unwrap(); let iter = EntryValuesMut::from_map_entry(unsafe { &mut *self.values }, map_entry); Some((key, iter)) @@ -3290,7 +3285,7 @@ where fn next(&mut self) -> Option { let key = self.iter.next()?; - let hash = hash_key(self.build_hasher, key); + let hash = self.build_hasher.hash_one(key); let (_, map_entry) = raw_entry(self.keys, self.map, hash, key).unwrap(); let iter = EntryValuesMut::from_map_entry(unsafe { &mut *self.values }, map_entry); Some((key, iter)) @@ -3474,18 +3469,6 @@ impl Hasher for DummyHasher { } } -/// Computes the hash value of the given key. -#[must_use] -fn hash_key(state: &State, key: &KeyQuery) -> u64 -where - KeyQuery: ?Sized + Eq + Hash, - State: BuildHasher, -{ - let mut hasher = state.build_hasher(); - key.hash(&mut hasher); - hasher.finish() -} - #[must_use] fn raw_entry<'map, Key, KeyQuery, Value, State>( keys: &VecList, @@ -3587,7 +3570,7 @@ mod test { let mut map = ListOrderedMultimap::with_hasher(TestBuildHasher); let state = map.hasher(); - assert_eq!(hash_key(state, "key1"), hash_key(state, "key2")); + assert_eq!(state.hash_one("key1"), state.hash_one("key2")); map.insert("key1", "value1"); assert_eq!(map.get(&"key1"), Some(&"value1")); @@ -3599,8 +3582,8 @@ mod test { #[test] fn test_no_collision() { let state = RandomState::new(); - let hash_1 = hash_key(&state, "key1"); - let hash_2 = hash_key(&state, "key2"); + let hash_1 = state.hash_one("key1"); + let hash_2 = state.hash_one("key2"); assert!(hash_1 != hash_2); } @@ -4519,7 +4502,11 @@ mod test { #[test] fn test_list_ordered_multimap_extend() { let mut map = ListOrderedMultimap::new(); - map.extend(vec![("key1", "value1"), ("key2", "value2"), ("key2", "value3")].into_iter()); + map.extend(vec![ + ("key1", "value1"), + ("key2", "value2"), + ("key2", "value3"), + ]); let mut iter = map.get_all(&"key1"); assert_eq!(iter.next(), Some(&"value1")); @@ -4531,7 +4518,7 @@ mod test { assert_eq!(iter.next(), None); let mut map = ListOrderedMultimap::new(); - map.extend(vec![(&1, &1), (&2, &1), (&2, &2)].into_iter()); + map.extend(vec![(&1, &1), (&2, &1), (&2, &2)]); let mut iter = map.get_all(&1); assert_eq!(iter.next(), Some(&1)); @@ -4545,9 +4532,11 @@ mod test { #[test] fn test_list_ordered_multimap_from_iterator() { - let map: ListOrderedMultimap<_, _, RandomState> = ListOrderedMultimap::from_iter( - vec![("key1", "value1"), ("key2", "value2"), ("key2", "value3")].into_iter(), - ); + let map: ListOrderedMultimap<_, _, RandomState> = ListOrderedMultimap::from_iter(vec![ + ("key1", "value1"), + ("key2", "value2"), + ("key2", "value3"), + ]); let mut iter = map.get_all(&"key1"); assert_eq!(iter.next(), Some(&"value1")); diff --git a/vendor/rust-ini/README.rst b/vendor/rust-ini/README.rst deleted file mode 100644 index 1881853..0000000 --- a/vendor/rust-ini/README.rst +++ /dev/null @@ -1,114 +0,0 @@ -INI in Rust ------------ - -.. image:: https://github.com/zonyitoo/rust-ini/actions/workflows/build-and-test.yml/badge.svg - :target: https://github.com/zonyitoo/rust-ini/actions/workflows/build-and-test.yml - -.. image:: https://img.shields.io/crates/v/rust-ini.svg - :target: https://crates.io/crates/rust-ini - -.. image:: https://docs.rs/rust-ini/badge.svg - :target: https://docs.rs/rust-ini - -INI_ is an informal standard for configuration files for some platforms or software. INI files are simple text files with a basic structure composed of "sections" and "properties". - -.. _INI: http://en.wikipedia.org/wiki/INI_file - -This is an INI file parser in Rust_. - -.. _Rust: http://www.rust-lang.org/ - -.. code:: toml - - [dependencies] - rust-ini = "0.19" - -Usage -===== - -* Create a Ini configuration file. - -.. code:: rust - - extern crate ini; - use ini::Ini; - - fn main() { - let mut conf = Ini::new(); - conf.with_section(None::) - .set("encoding", "utf-8"); - conf.with_section(Some("User")) - .set("given_name", "Tommy") - .set("family_name", "Green") - .set("unicode", "Raspberry树莓"); - conf.with_section(Some("Book")) - .set("name", "Rust cool"); - conf.write_to_file("conf.ini").unwrap(); - } - -Then you will get ``conf.ini`` - -.. code:: ini - - encoding=utf-8 - - [User] - given_name=Tommy - family_name=Green - unicode=Raspberry\x6811\x8393 - - [Book] - name=Rust cool - -* Read from file ``conf.ini`` - -.. code:: rust - - extern crate ini; - use ini::Ini; - - fn main() { - let conf = Ini::load_from_file("conf.ini").unwrap(); - - let section = conf.section(Some("User")).unwrap(); - let tommy = section.get("given_name").unwrap(); - let green = section.get("family_name").unwrap(); - - println!("{:?} {:?}", tommy, green); - - // iterating - for (sec, prop) in &conf { - println!("Section: {:?}", sec); - for (key, value) in prop.iter() { - println!("{:?}:{:?}", key, value); - } - } - } - -* More details could be found in `examples`. - -License -======= - -`The MIT License (MIT)`_ - -.. _The MIT License (MIT): https://opensource.org/licenses/MIT - -Copyright (c) 2014 Y. T. CHUNG - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -- Gitee From f6676f60d9bb33549ae4b3606195163fa5d3f3b2 Mon Sep 17 00:00:00 2001 From: yangpan Date: Fri, 5 Jan 2024 10:38:04 +0800 Subject: [PATCH 5/6] =?UTF-8?q?=E4=BF=AE=E6=94=B9vendor--4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- vendor/hashbrown-0.12.3/.cargo-checksum.json | 1 + vendor/hashbrown-0.12.3/CHANGELOG.md | 402 ++++++++++++++++++ vendor/hashbrown-0.12.3/LICENSE-APACHE | 201 +++++++++ vendor/hashbrown-0.12.3/LICENSE-MIT | 25 ++ vendor/hashbrown-0.12.3/README.md | 126 ++++++ vendor/hashbrown-0.12.3/benches/bench.rs | 331 ++++++++++++++ .../benches/insert_unique_unchecked.rs | 32 ++ vendor/hashbrown-0.12.3/clippy.toml | 1 + .../src/external_trait_impls/mod.rs | 4 + 9 files changed, 1123 insertions(+) create mode 100644 vendor/hashbrown-0.12.3/.cargo-checksum.json create mode 100644 vendor/hashbrown-0.12.3/CHANGELOG.md create mode 100644 vendor/hashbrown-0.12.3/LICENSE-APACHE create mode 100644 vendor/hashbrown-0.12.3/LICENSE-MIT create mode 100644 vendor/hashbrown-0.12.3/README.md create mode 100644 vendor/hashbrown-0.12.3/benches/bench.rs create mode 100644 vendor/hashbrown-0.12.3/benches/insert_unique_unchecked.rs create mode 100644 vendor/hashbrown-0.12.3/clippy.toml create mode 100644 vendor/hashbrown-0.12.3/src/external_trait_impls/mod.rs diff --git a/vendor/hashbrown-0.12.3/.cargo-checksum.json b/vendor/hashbrown-0.12.3/.cargo-checksum.json new file mode 100644 index 0000000..5561cde --- /dev/null +++ b/vendor/hashbrown-0.12.3/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"CHANGELOG.md":"ade49a29d368e16ce508aee91b477ecbad7e2e52eb6fee7b4c1fc86199963f0e","Cargo.toml":"421b3a71d97faf0a7e52c3b2bfbe0f1c036b9dbf6232b4e5b41221bb54358f5a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"a536b3bb3f3521e59836080f05a4783150fa8484f759a31468ce3b6dba1f33eb","benches/bench.rs":"aadc39d815eadf094ed9357d946319df2d93194203bbccb7c33cea6951d654df","benches/insert_unique_unchecked.rs":"cb84275f22d5f95a5ac995ac6b2df74ffcf342765b401d27c95f2955c7b7cb9f","clippy.toml":"7535949f908c6d9aea4f9a9f3a7625552c93fc29e963d059d40f4def9d77ea7b","src/external_trait_impls/mod.rs":"d69528827794524cfd9acbeacc1ac4f6131e3c7574311e6d919f818f65fbff07","src/external_trait_impls/rayon/helpers.rs":"ba105bf0853ebc45157f22116ad0f55d3bdab75e721d8e7a677c7b912d0c0c6d","src/external_trait_impls/rayon/map.rs":"2809e2a0071db8101c38789deb955f3830c5c3455eb1794ff64a0cf2ceb53fc7","src/external_trait_impls/rayon/mod.rs":"156de9c1ad0123334ea3b7e5a17444faf1b8bf971aa88a1f23e2f2d1c3021141","src/external_trait_impls/rayon/raw.rs":"e62c5f3ca5fffea47357e64b6f8c34cec94af62d9bd28a2b87934da46c22b66e","src/external_trait_impls/rayon/set.rs":"c4c44d44e56c2f59e9e1355662e29d8744ac96645ca4414127a359fb46cb0fbf","src/external_trait_impls/serde.rs":"0bc1a1f218d1ae7a5262557a5e3737b9334caf7d50c136dbdc75ff75680c223b","src/lib.rs":"c82fbee9684bfff40ef55d5f0c9f855c11f71f9fd1720fb084ef8331bdbc41d8","src/macros.rs":"36fe532656879c80f7753d13354b889f5b45caef451a1bb3a27dbc32d74c9878","src/map.rs":"df39edae67c569378dea9a4d928685cb4d06569712c6ac36a54df76fb5d87fe3","src/raw/alloc.rs":"184a0345bc2c7544b65c28724063be26b1f2b28dbaaa028a0b01192ccac25557","src/raw/bitmask.rs":"820d90b19b7e3433a1048ace008c9526331cd53a576cb0cfc1ff9960b6fe52f8","src/raw/generic.rs":"f5013a50d6d82d5cc8bad8b8c26c24d00fa810197f9f123256c58ac92e0d98f9","src/raw/mod.rs":"fa38247c6b3bd70636be50400debb9966a3446d49ee13e4f4e2dfe4ceed1b201","src/raw/sse2.rs":"838cfdb1daa1e70951ed25f985283b8b7ab4b46fa130f92eda152047ce6086f6","src/rustc_entry.rs":"cdd70972cba5b79ca1cad79869cb5e184d6dc4798ef90822e966ef89679ba011","src/scopeguard.rs":"d13de1b12897add7fe1c3eba6f906c9cc09d86509b6cfe06b95d63803fe9265c","src/set.rs":"6877d4a42eeadd681e3b8881528e4b20f14cfedbc11e9318bfcf425ef96d1546","tests/hasher.rs":"9a8fdf67e4415618e16729969c386eefe71408cded5d46cf7b67d969276a3452","tests/rayon.rs":"83d5289771542203f539a41cccb889fbe7ce70f5adf5b903ac9f051e3ba13cfa","tests/serde.rs":"6bac8054db722dd049901b37a6e006535bac30f425eb5cd91af19b5bc1dfe78e","tests/set.rs":"01cf39efb04646ef4c63a809ebb96dfa63cfec472bf8bdb6c121f6526d40c40e"},"package":"8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"} \ No newline at end of file diff --git a/vendor/hashbrown-0.12.3/CHANGELOG.md b/vendor/hashbrown-0.12.3/CHANGELOG.md new file mode 100644 index 0000000..3354b54 --- /dev/null +++ b/vendor/hashbrown-0.12.3/CHANGELOG.md @@ -0,0 +1,402 @@ +# Change Log + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/) +and this project adheres to [Semantic Versioning](https://semver.org/). + +## [Unreleased] + +## [v0.12.3] - 2022-07-17 + +## Fixed + +- Fixed double-drop in `RawTable::clone_from`. (#348) + +## [v0.12.2] - 2022-07-09 + +## Added + +- Added `Entry` API for `HashSet`. (#342) +- Added `Extend<&'a (K, V)> for HashMap`. (#340) +- Added length-based short-circuiting for hash table iteration. (#338) +- Added a function to access the `RawTable` of a `HashMap`. (#335) + +## Changed + +- Edited `do_alloc` to reduce LLVM IR generated. (#341) + +## [v0.12.1] - 2022-05-02 + +## Fixed + +- Fixed underflow in `RawIterRange::size_hint`. (#325) +- Fixed the implementation of `Debug` for `ValuesMut` and `IntoValues`. (#325) + +## [v0.12.0] - 2022-01-17 + +## Added + +- Added `From<[T; N]>` and `From<[(K, V); N]>` for `HashSet` and `HashMap` respectively. (#297) +- Added an `allocator()` getter to HashMap and HashSet. (#257) +- Added `insert_unique_unchecked` to `HashMap` and `HashSet`. (#293) +- Added `into_keys` and `into_values` to HashMap. (#295) +- Implement `From` on `HashSet` and `HashMap`. (#298) +- Added `entry_ref` API to `HashMap`. (#201) + +## Changed + +- Bumped minimum Rust version to 1.56.1 and edition to 2021. +- Use u64 for the GroupWord on WebAssembly. (#271) +- Optimized `find`. (#279) +- Made rehashing and resizing less generic to reduce compilation time. (#282) +- Inlined small functions. (#283) +- Use `BuildHasher::hash_one` when `feature = "nightly"` is enabled. (#292) +- Relaxed the bounds on `Debug` for `HashSet`. (#296) +- Rename `get_each_mut` to `get_many_mut` and align API with the stdlib. (#291) +- Don't hash the key when searching in an empty table. (#305) + +## Fixed + +- Guard against allocations exceeding isize::MAX. (#268) +- Made `RawTable::insert_no_grow` unsafe. (#254) +- Inline `static_empty`. (#280) +- Fixed trait bounds on Send/Sync impls. (#303) + +## [v0.11.2] - 2021-03-25 + +## Fixed + +- Added missing allocator type parameter to `HashMap`'s and `HashSet`'s `Clone` impls. (#252) + +## [v0.11.1] - 2021-03-20 + +## Fixed + +- Added missing `pub` modifier to `BumpWrapper`. (#251) + +## [v0.11.0] - 2021-03-14 + +## Added +- Added safe `try_insert_no_grow` method to `RawTable`. (#229) +- Added support for `bumpalo` as an allocator without the `nightly` feature. (#231) +- Implemented `Default` for `RawTable`. (#237) +- Added new safe methods `RawTable::get_each_mut`, `HashMap::get_each_mut`, and + `HashMap::get_each_key_value_mut`. (#239) +- Added `From>` for `HashSet`. (#235) +- Added `try_insert` method to `HashMap`. (#247) + +## Changed +- The minimum Rust version has been bumped to 1.49.0. (#230) +- Significantly improved compilation times by reducing the amount of generated IR. (#205) + +## Removed +- We no longer re-export the unstable allocator items from the standard library, nor the stable shims approximating the same. (#227) +- Removed hasher specialization support from `aHash`, which was resulting in inconsistent hashes being generated for a key. (#248) + +## Fixed +- Fixed union length comparison. (#228) + +## ~~[v0.10.0] - 2021-01-16~~ + +This release was _yanked_ due to inconsistent hashes being generated with the `nightly` feature. (#248) + +## Changed +- Parametrized `RawTable`, `HashSet` and `HashMap` over an allocator. (#133) +- Improved branch prediction hints on stable. (#209) +- Optimized hashing of primitive types with AHash using specialization. (#207) +- Only instantiate `RawTable`'s reserve functions once per key-value. (#204) + +## [v0.9.1] - 2020-09-28 + +## Added +- Added safe methods to `RawTable` (#202): + - `get`: `find` and `as_ref` + - `get_mut`: `find` and `as_mut` + - `insert_entry`: `insert` and `as_mut` + - `remove_entry`: `find` and `remove` + - `erase_entry`: `find` and `erase` + +## Changed +- Removed `from_key_hashed_nocheck`'s `Q: Hash`. (#200) +- Made `RawTable::drain` safe. (#201) + +## [v0.9.0] - 2020-09-03 + +### Fixed +- `drain_filter` now removes and yields items that do match the predicate, + rather than items that don't. This is a **breaking change** to match the + behavior of the `drain_filter` methods in `std`. (#187) + +### Added +- Added `replace_entry_with` to `OccupiedEntry`, and `and_replace_entry_with` to `Entry`. (#190) +- Implemented `FusedIterator` and `size_hint` for `DrainFilter`. (#188) + +### Changed +- The minimum Rust version has been bumped to 1.36 (due to `crossbeam` dependency). (#193) +- Updated `ahash` dependency to 0.4. (#198) +- `HashMap::with_hasher` and `HashSet::with_hasher` are now `const fn`. (#195) +- Removed `T: Hash + Eq` and `S: BuildHasher` bounds on `HashSet::new`, + `with_capacity`, `with_hasher`, and `with_capacity_and_hasher`. (#185) + +## [v0.8.2] - 2020-08-08 + +### Changed +- Avoid closures to improve compile times. (#183) +- Do not iterate to drop if empty. (#182) + +## [v0.8.1] - 2020-07-16 + +### Added +- Added `erase` and `remove` to `RawTable`. (#171) +- Added `try_with_capacity` to `RawTable`. (#174) +- Added methods that allow re-using a `RawIter` for `RawDrain`, + `RawIntoIter`, and `RawParIter`. (#175) +- Added `reflect_remove` and `reflect_insert` to `RawIter`. (#175) +- Added a `drain_filter` function to `HashSet`. (#179) + +### Changed +- Deprecated `RawTable::erase_no_drop` in favor of `erase` and `remove`. (#176) +- `insert_no_grow` is now exposed under the `"raw"` feature. (#180) + +## [v0.8.0] - 2020-06-18 + +### Fixed +- Marked `RawTable::par_iter` as `unsafe`. (#157) + +### Changed +- Reduced the size of `HashMap`. (#159) +- No longer create tables with a capacity of 1 element. (#162) +- Removed `K: Eq + Hash` bounds on `retain`. (#163) +- Pulled in `HashMap` changes from rust-lang/rust (#164): + - `extend_one` support on nightly. + - `CollectionAllocErr` renamed to `TryReserveError`. + - Added `HashSet::get_or_insert_owned`. + - `Default` for `HashSet` no longer requires `T: Eq + Hash` and `S: BuildHasher`. + +## [v0.7.2] - 2020-04-27 + +### Added +- Added `or_insert_with_key` to `Entry`. (#152) + +### Fixed +- Partially reverted `Clone` optimization which was unsound. (#154) + +### Changed +- Disabled use of `const-random` by default, which prevented reproducible builds. (#155) +- Optimized `repeat` function. (#150) +- Use `NonNull` for buckets, which improves codegen for iterators. (#148) + +## [v0.7.1] - 2020-03-16 + +### Added +- Added `HashMap::get_key_value_mut`. (#145) + +### Changed +- Optimized `Clone` implementation. (#146) + +## [v0.7.0] - 2020-01-31 + +### Added +- Added a `drain_filter` function to `HashMap`. (#135) + +### Changed +- Updated `ahash` dependency to 0.3. (#141) +- Optimized set union and intersection. (#130) +- `raw_entry` can now be used without requiring `S: BuildHasher`. (#123) +- `RawTable::bucket_index` can now be used under the `raw` feature. (#128) + +## [v0.6.3] - 2019-10-31 + +### Added +- Added an `ahash-compile-time-rng` feature (enabled by default) which allows disabling the + `compile-time-rng` feature in `ahash` to work around a Cargo bug. (#125) + +## [v0.6.2] - 2019-10-23 + +### Added +- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between + runtime performance and compilation time. (#119) + +## [v0.6.1] - 2019-10-04 + +### Added +- Added `Entry::insert` and `RawEntryMut::insert`. (#118) + +### Changed +- `Group::static_empty` was changed from a `const` to a `static` (#116). + +## [v0.6.0] - 2019-08-13 + +### Fixed +- Fixed AHash accidentally depending on `std`. (#110) + +### Changed +- The minimum Rust version has been bumped to 1.32 (due to `rand` dependency). + +## ~~[v0.5.1] - 2019-08-04~~ + +This release was _yanked_ due to a breaking change for users of `no-default-features`. + +### Added +- The experimental and unsafe `RawTable` API is available under the "raw" feature. (#108) +- Added entry-like methods for `HashSet`. (#98) + +### Changed +- Changed the default hasher from FxHash to AHash. (#97) +- `hashbrown` is now fully `no_std` on recent Rust versions (1.36+). (#96) + +### Fixed +- We now avoid growing the table during insertions when it wasn't necessary. (#106) +- `RawOccupiedEntryMut` now properly implements `Send` and `Sync`. (#100) +- Relaxed `lazy_static` version. (#92) + +## [v0.5.0] - 2019-06-12 + +### Fixed +- Resize with a more conservative amount of space after deletions. (#86) + +### Changed +- Exposed the Layout of the failed allocation in CollectionAllocErr::AllocErr. (#89) + +## [v0.4.0] - 2019-05-30 + +### Fixed +- Fixed `Send` trait bounds on `IterMut` not matching the libstd one. (#82) + +## [v0.3.1] - 2019-05-30 + +### Fixed +- Fixed incorrect use of slice in unsafe code. (#80) + +## [v0.3.0] - 2019-04-23 + +### Changed +- Changed shrink_to to not panic if min_capacity < capacity. (#67) + +### Fixed +- Worked around emscripten bug emscripten-core/emscripten-fastcomp#258. (#66) + +## [v0.2.2] - 2019-04-16 + +### Fixed +- Inlined non-nightly lowest_set_bit_nonzero. (#64) +- Fixed build on latest nightly. (#65) + +## [v0.2.1] - 2019-04-14 + +### Changed +- Use for_each in map Extend and FromIterator. (#58) +- Improved worst-case performance of HashSet.is_subset. (#61) + +### Fixed +- Removed incorrect debug_assert. (#60) + +## [v0.2.0] - 2019-03-31 + +### Changed +- The code has been updated to Rust 2018 edition. This means that the minimum + Rust version has been bumped to 1.31 (2018 edition). + +### Added +- Added `insert_with_hasher` to the raw_entry API to allow `K: !(Hash + Eq)`. (#54) +- Added support for using hashbrown as the hash table implementation in libstd. (#46) + +### Fixed +- Fixed cargo build with minimal-versions. (#45) +- Fixed `#[may_dangle]` attributes to match the libstd `HashMap`. (#46) +- ZST keys and values are now handled properly. (#46) + +## [v0.1.8] - 2019-01-14 + +### Added +- Rayon parallel iterator support (#37) +- `raw_entry` support (#31) +- `#[may_dangle]` on nightly (#31) +- `try_reserve` support (#31) + +### Fixed +- Fixed variance on `IterMut`. (#31) + +## [v0.1.7] - 2018-12-05 + +### Fixed +- Fixed non-SSE version of convert_special_to_empty_and_full_to_deleted. (#32) +- Fixed overflow in rehash_in_place. (#33) + +## [v0.1.6] - 2018-11-17 + +### Fixed +- Fixed compile error on nightly. (#29) + +## [v0.1.5] - 2018-11-08 + +### Fixed +- Fixed subtraction overflow in generic::Group::match_byte. (#28) + +## [v0.1.4] - 2018-11-04 + +### Fixed +- Fixed a bug in the `erase_no_drop` implementation. (#26) + +## [v0.1.3] - 2018-11-01 + +### Added +- Serde support. (#14) + +### Fixed +- Make the compiler inline functions more aggressively. (#20) + +## [v0.1.2] - 2018-10-31 + +### Fixed +- `clear` segfaults when called on an empty table. (#13) + +## [v0.1.1] - 2018-10-30 + +### Fixed +- `erase_no_drop` optimization not triggering in the SSE2 implementation. (#3) +- Missing `Send` and `Sync` for hash map and iterator types. (#7) +- Bug when inserting into a table smaller than the group width. (#5) + +## v0.1.0 - 2018-10-29 + +- Initial release + +[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.12.3...HEAD +[v0.12.3]: https://github.com/rust-lang/hashbrown/compare/v0.12.2...v0.12.3 +[v0.12.2]: https://github.com/rust-lang/hashbrown/compare/v0.12.1...v0.12.2 +[v0.12.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.0...v0.12.1 +[v0.12.0]: https://github.com/rust-lang/hashbrown/compare/v0.11.2...v0.12.0 +[v0.11.2]: https://github.com/rust-lang/hashbrown/compare/v0.11.1...v0.11.2 +[v0.11.1]: https://github.com/rust-lang/hashbrown/compare/v0.11.0...v0.11.1 +[v0.11.0]: https://github.com/rust-lang/hashbrown/compare/v0.10.0...v0.11.0 +[v0.10.0]: https://github.com/rust-lang/hashbrown/compare/v0.9.1...v0.10.0 +[v0.9.1]: https://github.com/rust-lang/hashbrown/compare/v0.9.0...v0.9.1 +[v0.9.0]: https://github.com/rust-lang/hashbrown/compare/v0.8.2...v0.9.0 +[v0.8.2]: https://github.com/rust-lang/hashbrown/compare/v0.8.1...v0.8.2 +[v0.8.1]: https://github.com/rust-lang/hashbrown/compare/v0.8.0...v0.8.1 +[v0.8.0]: https://github.com/rust-lang/hashbrown/compare/v0.7.2...v0.8.0 +[v0.7.2]: https://github.com/rust-lang/hashbrown/compare/v0.7.1...v0.7.2 +[v0.7.1]: https://github.com/rust-lang/hashbrown/compare/v0.7.0...v0.7.1 +[v0.7.0]: https://github.com/rust-lang/hashbrown/compare/v0.6.3...v0.7.0 +[v0.6.3]: https://github.com/rust-lang/hashbrown/compare/v0.6.2...v0.6.3 +[v0.6.2]: https://github.com/rust-lang/hashbrown/compare/v0.6.1...v0.6.2 +[v0.6.1]: https://github.com/rust-lang/hashbrown/compare/v0.6.0...v0.6.1 +[v0.6.0]: https://github.com/rust-lang/hashbrown/compare/v0.5.1...v0.6.0 +[v0.5.1]: https://github.com/rust-lang/hashbrown/compare/v0.5.0...v0.5.1 +[v0.5.0]: https://github.com/rust-lang/hashbrown/compare/v0.4.0...v0.5.0 +[v0.4.0]: https://github.com/rust-lang/hashbrown/compare/v0.3.1...v0.4.0 +[v0.3.1]: https://github.com/rust-lang/hashbrown/compare/v0.3.0...v0.3.1 +[v0.3.0]: https://github.com/rust-lang/hashbrown/compare/v0.2.2...v0.3.0 +[v0.2.2]: https://github.com/rust-lang/hashbrown/compare/v0.2.1...v0.2.2 +[v0.2.1]: https://github.com/rust-lang/hashbrown/compare/v0.2.0...v0.2.1 +[v0.2.0]: https://github.com/rust-lang/hashbrown/compare/v0.1.8...v0.2.0 +[v0.1.8]: https://github.com/rust-lang/hashbrown/compare/v0.1.7...v0.1.8 +[v0.1.7]: https://github.com/rust-lang/hashbrown/compare/v0.1.6...v0.1.7 +[v0.1.6]: https://github.com/rust-lang/hashbrown/compare/v0.1.5...v0.1.6 +[v0.1.5]: https://github.com/rust-lang/hashbrown/compare/v0.1.4...v0.1.5 +[v0.1.4]: https://github.com/rust-lang/hashbrown/compare/v0.1.3...v0.1.4 +[v0.1.3]: https://github.com/rust-lang/hashbrown/compare/v0.1.2...v0.1.3 +[v0.1.2]: https://github.com/rust-lang/hashbrown/compare/v0.1.1...v0.1.2 +[v0.1.1]: https://github.com/rust-lang/hashbrown/compare/v0.1.0...v0.1.1 diff --git a/vendor/hashbrown-0.12.3/LICENSE-APACHE b/vendor/hashbrown-0.12.3/LICENSE-APACHE new file mode 100644 index 0000000..16fe87b --- /dev/null +++ b/vendor/hashbrown-0.12.3/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/hashbrown-0.12.3/LICENSE-MIT b/vendor/hashbrown-0.12.3/LICENSE-MIT new file mode 100644 index 0000000..5afc2a7 --- /dev/null +++ b/vendor/hashbrown-0.12.3/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2016 Amanieu d'Antras + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/hashbrown-0.12.3/README.md b/vendor/hashbrown-0.12.3/README.md new file mode 100644 index 0000000..2eddcf3 --- /dev/null +++ b/vendor/hashbrown-0.12.3/README.md @@ -0,0 +1,126 @@ +hashbrown +========= + +[![Build Status](https://github.com/rust-lang/hashbrown/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/hashbrown/actions) +[![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown) +[![Documentation](https://docs.rs/hashbrown/badge.svg)](https://docs.rs/hashbrown) +[![Rust](https://img.shields.io/badge/rust-1.56.1%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown) + +This crate is a Rust port of Google's high-performance [SwissTable] hash +map, adapted to make it a drop-in replacement for Rust's standard `HashMap` +and `HashSet` types. + +The original C++ version of SwissTable can be found [here], and this +[CppCon talk] gives an overview of how the algorithm works. + +Since Rust 1.36, this is now the `HashMap` implementation for the Rust standard +library. However you may still want to use this crate instead since it works +in environments without `std`, such as embedded systems and kernels. + +[SwissTable]: https://abseil.io/blog/20180927-swisstables +[here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h +[CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4 + +## [Change log](CHANGELOG.md) + +## Features + +- Drop-in replacement for the standard library `HashMap` and `HashSet` types. +- Uses [AHash](https://github.com/tkaitchuck/aHash) as the default hasher, which is much faster than SipHash. + However, AHash does *not provide the same level of HashDoS resistance* as SipHash, so if that is important to you, you might want to consider using a different hasher. +- Around 2x faster than the previous standard library `HashMap`. +- Lower memory usage: only 1 byte of overhead per entry instead of 8. +- Compatible with `#[no_std]` (but requires a global allocator with the `alloc` crate). +- Empty hash maps do not allocate any memory. +- SIMD lookups to scan multiple hash entries in parallel. + +## Performance + +Compared to the previous implementation of `std::collections::HashMap` (Rust 1.35). + +With the hashbrown default AHash hasher: + +| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | +|:------------------------|:-------------------:|------------------:|:------------:|---------:|---------| +| insert_ahash_highbits | 18,865 | 8,020 | -10,845 | -57.49% | x 2.35 | +| insert_ahash_random | 19,711 | 8,019 | -11,692 | -59.32% | x 2.46 | +| insert_ahash_serial | 19,365 | 6,463 | -12,902 | -66.63% | x 3.00 | +| insert_erase_ahash_highbits | 51,136 | 17,916 | -33,220 | -64.96% | x 2.85 | +| insert_erase_ahash_random | 51,157 | 17,688 | -33,469 | -65.42% | x 2.89 | +| insert_erase_ahash_serial | 45,479 | 14,895 | -30,584 | -67.25% | x 3.05 | +| iter_ahash_highbits | 1,399 | 1,092 | -307 | -21.94% | x 1.28 | +| iter_ahash_random | 1,586 | 1,059 | -527 | -33.23% | x 1.50 | +| iter_ahash_serial | 3,168 | 1,079 | -2,089 | -65.94% | x 2.94 | +| lookup_ahash_highbits | 32,351 | 4,792 | -27,559 | -85.19% | x 6.75 | +| lookup_ahash_random | 17,419 | 4,817 | -12,602 | -72.35% | x 3.62 | +| lookup_ahash_serial | 15,254 | 3,606 | -11,648 | -76.36% | x 4.23 | +| lookup_fail_ahash_highbits | 21,187 | 4,369 | -16,818 | -79.38% | x 4.85 | +| lookup_fail_ahash_random | 21,550 | 4,395 | -17,155 | -79.61% | x 4.90 | +| lookup_fail_ahash_serial | 19,450 | 3,176 | -16,274 | -83.67% | x 6.12 | + + +With the libstd default SipHash hasher: + +|name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup | +|:------------------------|:-------------------:|------------------:|:------------:|---------:|---------| +|insert_std_highbits |19,216 |16,885 | -2,331 | -12.13% | x 1.14 | +|insert_std_random |19,179 |17,034 | -2,145 | -11.18% | x 1.13 | +|insert_std_serial |19,462 |17,493 | -1,969 | -10.12% | x 1.11 | +|insert_erase_std_highbits |50,825 |35,847 | -14,978 | -29.47% | x 1.42 | +|insert_erase_std_random |51,448 |35,392 | -16,056 | -31.21% | x 1.45 | +|insert_erase_std_serial |87,711 |38,091 | -49,620 | -56.57% | x 2.30 | +|iter_std_highbits |1,378 |1,159 | -219 | -15.89% | x 1.19 | +|iter_std_random |1,395 |1,132 | -263 | -18.85% | x 1.23 | +|iter_std_serial |1,704 |1,105 | -599 | -35.15% | x 1.54 | +|lookup_std_highbits |17,195 |13,642 | -3,553 | -20.66% | x 1.26 | +|lookup_std_random |17,181 |13,773 | -3,408 | -19.84% | x 1.25 | +|lookup_std_serial |15,483 |13,651 | -1,832 | -11.83% | x 1.13 | +|lookup_fail_std_highbits |20,926 |13,474 | -7,452 | -35.61% | x 1.55 | +|lookup_fail_std_random |21,766 |13,505 | -8,261 | -37.95% | x 1.61 | +|lookup_fail_std_serial |19,336 |13,519 | -5,817 | -30.08% | x 1.43 | + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +hashbrown = "0.12" +``` + +Then: + +```rust +use hashbrown::HashMap; + +let mut map = HashMap::new(); +map.insert(1, "one"); +``` +## Flags +This crate has the following Cargo features: + +- `nightly`: Enables nightly-only features including: `#[may_dangle]`. +- `serde`: Enables serde serialization support. +- `rayon`: Enables rayon parallel iterator support. +- `raw`: Enables access to the experimental and unsafe `RawTable` API. +- `inline-more`: Adds inline hints to most functions, improving run-time performance at the cost + of compilation time. (enabled by default) +- `bumpalo`: Provides a `BumpWrapper` type which allows `bumpalo` to be used for memory allocation. +- `ahash`: Compiles with ahash as default hasher. (enabled by default) +- `ahash-compile-time-rng`: Activates the `compile-time-rng` feature of ahash. For targets with no random number generator +this pre-generates seeds at compile time and embeds them as constants. See [aHash's documentation](https://github.com/tkaitchuck/aHash#flags) (disabled by default) + +## License + +Licensed under either of: + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any +additional terms or conditions. diff --git a/vendor/hashbrown-0.12.3/benches/bench.rs b/vendor/hashbrown-0.12.3/benches/bench.rs new file mode 100644 index 0000000..c393b9a --- /dev/null +++ b/vendor/hashbrown-0.12.3/benches/bench.rs @@ -0,0 +1,331 @@ +// This benchmark suite contains some benchmarks along a set of dimensions: +// Hasher: std default (SipHash) and crate default (AHash). +// Int key distribution: low bit heavy, top bit heavy, and random. +// Task: basic functionality: insert, insert_erase, lookup, lookup_fail, iter +#![feature(test)] + +extern crate test; + +use test::{black_box, Bencher}; + +use hashbrown::hash_map::DefaultHashBuilder; +use hashbrown::{HashMap, HashSet}; +use std::{ + collections::hash_map::RandomState, + sync::atomic::{self, AtomicUsize}, +}; + +const SIZE: usize = 1000; + +// The default hashmap when using this crate directly. +type AHashMap = HashMap; +// This uses the hashmap from this crate with the default hasher of the stdlib. +type StdHashMap = HashMap; + +// A random key iterator. +#[derive(Clone, Copy)] +struct RandomKeys { + state: usize, +} + +impl RandomKeys { + fn new() -> Self { + RandomKeys { state: 0 } + } +} + +impl Iterator for RandomKeys { + type Item = usize; + fn next(&mut self) -> Option { + // Add 1 then multiply by some 32 bit prime. + self.state = self.state.wrapping_add(1).wrapping_mul(3_787_392_781); + Some(self.state) + } +} + +// Just an arbitrary side effect to make the maps not shortcircuit to the non-dropping path +// when dropping maps/entries (most real world usages likely have drop in the key or value) +lazy_static::lazy_static! { + static ref SIDE_EFFECT: AtomicUsize = AtomicUsize::new(0); +} + +#[derive(Clone)] +struct DropType(usize); +impl Drop for DropType { + fn drop(&mut self) { + SIDE_EFFECT.fetch_add(self.0, atomic::Ordering::SeqCst); + } +} + +macro_rules! bench_suite { + ($bench_macro:ident, $bench_ahash_serial:ident, $bench_std_serial:ident, + $bench_ahash_highbits:ident, $bench_std_highbits:ident, + $bench_ahash_random:ident, $bench_std_random:ident) => { + $bench_macro!($bench_ahash_serial, AHashMap, 0..); + $bench_macro!($bench_std_serial, StdHashMap, 0..); + $bench_macro!( + $bench_ahash_highbits, + AHashMap, + (0..).map(usize::swap_bytes) + ); + $bench_macro!( + $bench_std_highbits, + StdHashMap, + (0..).map(usize::swap_bytes) + ); + $bench_macro!($bench_ahash_random, AHashMap, RandomKeys::new()); + $bench_macro!($bench_std_random, StdHashMap, RandomKeys::new()); + }; +} + +macro_rules! bench_insert { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::with_capacity_and_hasher(SIZE, Default::default()); + b.iter(|| { + m.clear(); + for i in ($keydist).take(SIZE) { + m.insert(i, (DropType(i), [i; 20])); + } + black_box(&mut m); + }); + eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst)); + } + }; +} + +bench_suite!( + bench_insert, + insert_ahash_serial, + insert_std_serial, + insert_ahash_highbits, + insert_std_highbits, + insert_ahash_random, + insert_std_random +); + +macro_rules! bench_grow_insert { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + b.iter(|| { + let mut m = $maptype::default(); + for i in ($keydist).take(SIZE) { + m.insert(i, DropType(i)); + } + black_box(&mut m); + }) + } + }; +} + +bench_suite!( + bench_grow_insert, + grow_insert_ahash_serial, + grow_insert_std_serial, + grow_insert_ahash_highbits, + grow_insert_std_highbits, + grow_insert_ahash_random, + grow_insert_std_random +); + +macro_rules! bench_insert_erase { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut base = $maptype::default(); + for i in ($keydist).take(SIZE) { + base.insert(i, DropType(i)); + } + let skip = $keydist.skip(SIZE); + b.iter(|| { + let mut m = base.clone(); + let mut add_iter = skip.clone(); + let mut remove_iter = $keydist; + // While keeping the size constant, + // replace the first keydist with the second. + for (add, remove) in (&mut add_iter).zip(&mut remove_iter).take(SIZE) { + m.insert(add, DropType(add)); + black_box(m.remove(&remove)); + } + black_box(m); + }); + eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst)); + } + }; +} + +bench_suite!( + bench_insert_erase, + insert_erase_ahash_serial, + insert_erase_std_serial, + insert_erase_ahash_highbits, + insert_erase_std_highbits, + insert_erase_ahash_random, + insert_erase_std_random +); + +macro_rules! bench_lookup { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::default(); + for i in $keydist.take(SIZE) { + m.insert(i, DropType(i)); + } + + b.iter(|| { + for i in $keydist.take(SIZE) { + black_box(m.get(&i)); + } + }); + eprintln!("{}", SIDE_EFFECT.load(atomic::Ordering::SeqCst)); + } + }; +} + +bench_suite!( + bench_lookup, + lookup_ahash_serial, + lookup_std_serial, + lookup_ahash_highbits, + lookup_std_highbits, + lookup_ahash_random, + lookup_std_random +); + +macro_rules! bench_lookup_fail { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::default(); + let mut iter = $keydist; + for i in (&mut iter).take(SIZE) { + m.insert(i, DropType(i)); + } + + b.iter(|| { + for i in (&mut iter).take(SIZE) { + black_box(m.get(&i)); + } + }) + } + }; +} + +bench_suite!( + bench_lookup_fail, + lookup_fail_ahash_serial, + lookup_fail_std_serial, + lookup_fail_ahash_highbits, + lookup_fail_std_highbits, + lookup_fail_ahash_random, + lookup_fail_std_random +); + +macro_rules! bench_iter { + ($name:ident, $maptype:ident, $keydist:expr) => { + #[bench] + fn $name(b: &mut Bencher) { + let mut m = $maptype::default(); + for i in ($keydist).take(SIZE) { + m.insert(i, DropType(i)); + } + + b.iter(|| { + for i in &m { + black_box(i); + } + }) + } + }; +} + +bench_suite!( + bench_iter, + iter_ahash_serial, + iter_std_serial, + iter_ahash_highbits, + iter_std_highbits, + iter_ahash_random, + iter_std_random +); + +#[bench] +fn clone_small(b: &mut Bencher) { + let mut m = HashMap::new(); + for i in 0..10 { + m.insert(i, DropType(i)); + } + + b.iter(|| { + black_box(m.clone()); + }) +} + +#[bench] +fn clone_from_small(b: &mut Bencher) { + let mut m = HashMap::new(); + let mut m2 = HashMap::new(); + for i in 0..10 { + m.insert(i, DropType(i)); + } + + b.iter(|| { + m2.clone_from(&m); + black_box(&mut m2); + }) +} + +#[bench] +fn clone_large(b: &mut Bencher) { + let mut m = HashMap::new(); + for i in 0..1000 { + m.insert(i, DropType(i)); + } + + b.iter(|| { + black_box(m.clone()); + }) +} + +#[bench] +fn clone_from_large(b: &mut Bencher) { + let mut m = HashMap::new(); + let mut m2 = HashMap::new(); + for i in 0..1000 { + m.insert(i, DropType(i)); + } + + b.iter(|| { + m2.clone_from(&m); + black_box(&mut m2); + }) +} + +#[bench] +fn rehash_in_place(b: &mut Bencher) { + b.iter(|| { + let mut set = HashSet::new(); + + // Each loop triggers one rehash + for _ in 0..10 { + for i in 0..224 { + set.insert(i); + } + + assert_eq!( + set.capacity(), + 224, + "The set must be at or close to capacity to trigger a re hashing" + ); + + for i in 100..1400 { + set.remove(&(i - 100)); + set.insert(i); + } + set.clear(); + } + }); +} diff --git a/vendor/hashbrown-0.12.3/benches/insert_unique_unchecked.rs b/vendor/hashbrown-0.12.3/benches/insert_unique_unchecked.rs new file mode 100644 index 0000000..857ad18 --- /dev/null +++ b/vendor/hashbrown-0.12.3/benches/insert_unique_unchecked.rs @@ -0,0 +1,32 @@ +//! Compare `insert` and `insert_unique_unchecked` operations performance. + +#![feature(test)] + +extern crate test; + +use hashbrown::HashMap; +use test::Bencher; + +#[bench] +fn insert(b: &mut Bencher) { + let keys: Vec = (0..1000).map(|i| format!("xxxx{}yyyy", i)).collect(); + b.iter(|| { + let mut m = HashMap::with_capacity(1000); + for k in &keys { + m.insert(k, k); + } + m + }); +} + +#[bench] +fn insert_unique_unchecked(b: &mut Bencher) { + let keys: Vec = (0..1000).map(|i| format!("xxxx{}yyyy", i)).collect(); + b.iter(|| { + let mut m = HashMap::with_capacity(1000); + for k in &keys { + m.insert_unique_unchecked(k, k); + } + m + }); +} diff --git a/vendor/hashbrown-0.12.3/clippy.toml b/vendor/hashbrown-0.12.3/clippy.toml new file mode 100644 index 0000000..d98bf2c --- /dev/null +++ b/vendor/hashbrown-0.12.3/clippy.toml @@ -0,0 +1 @@ +doc-valid-idents = [ "CppCon", "SwissTable", "SipHash", "HashDoS" ] diff --git a/vendor/hashbrown-0.12.3/src/external_trait_impls/mod.rs b/vendor/hashbrown-0.12.3/src/external_trait_impls/mod.rs new file mode 100644 index 0000000..ef49783 --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/external_trait_impls/mod.rs @@ -0,0 +1,4 @@ +#[cfg(feature = "rayon")] +pub(crate) mod rayon; +#[cfg(feature = "serde")] +mod serde; -- Gitee From 3a9972fe46c32fb2bc1e6e698a384f6021944af1 Mon Sep 17 00:00:00 2001 From: yangpan Date: Fri, 5 Jan 2024 10:38:34 +0800 Subject: [PATCH 6/6] =?UTF-8?q?=E4=BF=AE=E6=94=B9vendor--5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- vendor/hashbrown-0.12.3/Cargo.toml | 113 + .../src/external_trait_impls/rayon/helpers.rs | 27 + .../src/external_trait_impls/rayon/map.rs | 734 ++ .../src/external_trait_impls/rayon/mod.rs | 4 + .../src/external_trait_impls/rayon/raw.rs | 231 + .../src/external_trait_impls/rayon/set.rs | 659 ++ .../src/external_trait_impls/serde.rs | 201 + vendor/hashbrown-0.12.3/src/lib.rs | 150 + vendor/hashbrown-0.12.3/src/macros.rs | 70 + vendor/hashbrown-0.12.3/src/map.rs | 8408 +++++++++++++++++ vendor/hashbrown-0.12.3/src/raw/alloc.rs | 73 + vendor/hashbrown-0.12.3/src/raw/bitmask.rs | 122 + vendor/hashbrown-0.12.3/src/raw/generic.rs | 154 + vendor/hashbrown-0.12.3/src/raw/mod.rs | 2460 +++++ vendor/hashbrown-0.12.3/src/raw/sse2.rs | 146 + vendor/hashbrown-0.12.3/src/rustc_entry.rs | 630 ++ vendor/hashbrown-0.12.3/src/scopeguard.rs | 74 + vendor/hashbrown-0.12.3/src/set.rs | 2790 ++++++ vendor/hashbrown-0.12.3/tests/hasher.rs | 65 + vendor/hashbrown-0.12.3/tests/rayon.rs | 533 ++ vendor/hashbrown-0.12.3/tests/serde.rs | 65 + vendor/hashbrown-0.12.3/tests/set.rs | 34 + 22 files changed, 17743 insertions(+) create mode 100644 vendor/hashbrown-0.12.3/Cargo.toml create mode 100644 vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/helpers.rs create mode 100644 vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/map.rs create mode 100644 vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/mod.rs create mode 100644 vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/raw.rs create mode 100644 vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/set.rs create mode 100644 vendor/hashbrown-0.12.3/src/external_trait_impls/serde.rs create mode 100644 vendor/hashbrown-0.12.3/src/lib.rs create mode 100644 vendor/hashbrown-0.12.3/src/macros.rs create mode 100644 vendor/hashbrown-0.12.3/src/map.rs create mode 100644 vendor/hashbrown-0.12.3/src/raw/alloc.rs create mode 100644 vendor/hashbrown-0.12.3/src/raw/bitmask.rs create mode 100644 vendor/hashbrown-0.12.3/src/raw/generic.rs create mode 100644 vendor/hashbrown-0.12.3/src/raw/mod.rs create mode 100644 vendor/hashbrown-0.12.3/src/raw/sse2.rs create mode 100644 vendor/hashbrown-0.12.3/src/rustc_entry.rs create mode 100644 vendor/hashbrown-0.12.3/src/scopeguard.rs create mode 100644 vendor/hashbrown-0.12.3/src/set.rs create mode 100644 vendor/hashbrown-0.12.3/tests/hasher.rs create mode 100644 vendor/hashbrown-0.12.3/tests/rayon.rs create mode 100644 vendor/hashbrown-0.12.3/tests/serde.rs create mode 100644 vendor/hashbrown-0.12.3/tests/set.rs diff --git a/vendor/hashbrown-0.12.3/Cargo.toml b/vendor/hashbrown-0.12.3/Cargo.toml new file mode 100644 index 0000000..fb130d2 --- /dev/null +++ b/vendor/hashbrown-0.12.3/Cargo.toml @@ -0,0 +1,113 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.56.0" +name = "hashbrown" +version = "0.12.3" +authors = ["Amanieu d'Antras "] +exclude = [ + ".github", + "/ci/*", +] +description = "A Rust port of Google's SwissTable hash map" +readme = "README.md" +keywords = [ + "hash", + "no_std", + "hashmap", + "swisstable", +] +categories = [ + "data-structures", + "no-std", +] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/hashbrown" +resolver = "2" + +[package.metadata.docs.rs] +features = [ + "nightly", + "rayon", + "serde", + "raw", +] + +[dependencies.ahash] +version = "0.7.0" +optional = true +default-features = false + +[dependencies.alloc] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-alloc" + +[dependencies.bumpalo] +version = "3.5.0" +optional = true + +[dependencies.compiler_builtins] +version = "0.1.2" +optional = true + +[dependencies.core] +version = "1.0.0" +optional = true +package = "rustc-std-workspace-core" + +[dependencies.rayon] +version = "1.0" +optional = true + +[dependencies.serde] +version = "1.0.25" +optional = true +default-features = false + +[dev-dependencies.doc-comment] +version = "0.3.1" + +[dev-dependencies.fnv] +version = "1.0.7" + +[dev-dependencies.lazy_static] +version = "1.4" + +[dev-dependencies.rand] +version = "0.8.3" +features = ["small_rng"] + +[dev-dependencies.rayon] +version = "1.0" + +[dev-dependencies.serde_test] +version = "1.0" + +[features] +ahash-compile-time-rng = ["ahash/compile-time-rng"] +default = [ + "ahash", + "inline-more", +] +inline-more = [] +nightly = [] +raw = [] +rustc-dep-of-std = [ + "nightly", + "core", + "compiler_builtins", + "alloc", + "rustc-internal-api", +] +rustc-internal-api = [] diff --git a/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/helpers.rs b/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/helpers.rs new file mode 100644 index 0000000..070b08c --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/helpers.rs @@ -0,0 +1,27 @@ +use alloc::collections::LinkedList; +use alloc::vec::Vec; + +use rayon::iter::{IntoParallelIterator, ParallelIterator}; + +/// Helper for collecting parallel iterators to an intermediary +#[allow(clippy::linkedlist)] // yes, we need linked list here for efficient appending! +pub(super) fn collect(iter: I) -> (LinkedList>, usize) { + let list = iter + .into_par_iter() + .fold(Vec::new, |mut vec, elem| { + vec.push(elem); + vec + }) + .map(|vec| { + let mut list = LinkedList::new(); + list.push_back(vec); + list + }) + .reduce(LinkedList::new, |mut list1, mut list2| { + list1.append(&mut list2); + list1 + }); + + let len = list.iter().map(Vec::len).sum(); + (list, len) +} diff --git a/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/map.rs b/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/map.rs new file mode 100644 index 0000000..14d91c2 --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/map.rs @@ -0,0 +1,734 @@ +//! Rayon extensions for `HashMap`. + +use super::raw::{RawIntoParIter, RawParDrain, RawParIter}; +use crate::hash_map::HashMap; +use crate::raw::{Allocator, Global}; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::marker::PhantomData; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator}; + +/// Parallel iterator over shared references to entries in a map. +/// +/// This iterator is created by the [`par_iter`] method on [`HashMap`] +/// (provided by the [`IntoParallelRefIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter`]: /hashbrown/struct.HashMap.html#method.par_iter +/// [`HashMap`]: /hashbrown/struct.HashMap.html +/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html +pub struct ParIter<'a, K, V> { + inner: RawParIter<(K, V)>, + marker: PhantomData<(&'a K, &'a V)>, +} + +impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { + type Item = (&'a K, &'a V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { + let r = x.as_ref(); + (&r.0, &r.1) + }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParIter<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for ParIter<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = unsafe { self.inner.iter() }.map(|x| unsafe { + let r = x.as_ref(); + (&r.0, &r.1) + }); + f.debug_list().entries(iter).finish() + } +} + +/// Parallel iterator over shared references to keys in a map. +/// +/// This iterator is created by the [`par_keys`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_keys`]: /hashbrown/struct.HashMap.html#method.par_keys +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParKeys<'a, K, V> { + inner: RawParIter<(K, V)>, + marker: PhantomData<(&'a K, &'a V)>, +} + +impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> { + type Item = &'a K; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { &x.as_ref().0 }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParKeys<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for ParKeys<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = unsafe { self.inner.iter() }.map(|x| unsafe { &x.as_ref().0 }); + f.debug_list().entries(iter).finish() + } +} + +/// Parallel iterator over shared references to values in a map. +/// +/// This iterator is created by the [`par_values`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_values`]: /hashbrown/struct.HashMap.html#method.par_values +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParValues<'a, K, V> { + inner: RawParIter<(K, V)>, + marker: PhantomData<(&'a K, &'a V)>, +} + +impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> { + type Item = &'a V; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { &x.as_ref().1 }) + .drive_unindexed(consumer) + } +} + +impl Clone for ParValues<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for ParValues<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = unsafe { self.inner.iter() }.map(|x| unsafe { &x.as_ref().1 }); + f.debug_list().entries(iter).finish() + } +} + +/// Parallel iterator over mutable references to entries in a map. +/// +/// This iterator is created by the [`par_iter_mut`] method on [`HashMap`] +/// (provided by the [`IntoParallelRefMutIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter_mut`]: /hashbrown/struct.HashMap.html#method.par_iter_mut +/// [`HashMap`]: /hashbrown/struct.HashMap.html +/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html +pub struct ParIterMut<'a, K, V> { + inner: RawParIter<(K, V)>, + marker: PhantomData<(&'a K, &'a mut V)>, +} + +impl<'a, K: Sync, V: Send> ParallelIterator for ParIterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { + let r = x.as_mut(); + (&r.0, &mut r.1) + }) + .drive_unindexed(consumer) + } +} + +impl fmt::Debug for ParIterMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: self.inner.clone(), + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel iterator over mutable references to values in a map. +/// +/// This iterator is created by the [`par_values_mut`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_values_mut`]: /hashbrown/struct.HashMap.html#method.par_values_mut +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParValuesMut<'a, K, V> { + inner: RawParIter<(K, V)>, + marker: PhantomData<(&'a K, &'a mut V)>, +} + +impl<'a, K: Sync, V: Send> ParallelIterator for ParValuesMut<'a, K, V> { + type Item = &'a mut V; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner + .map(|x| unsafe { &mut x.as_mut().1 }) + .drive_unindexed(consumer) + } +} + +impl fmt::Debug for ParValuesMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParValues { + inner: self.inner.clone(), + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel iterator over entries of a consumed map. +/// +/// This iterator is created by the [`into_par_iter`] method on [`HashMap`] +/// (provided by the [`IntoParallelIterator`] trait). +/// See its documentation for more. +/// +/// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter +/// [`HashMap`]: /hashbrown/struct.HashMap.html +/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html +pub struct IntoParIter { + inner: RawIntoParIter<(K, V), A>, +} + +impl ParallelIterator for IntoParIter { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.drive_unindexed(consumer) + } +} + +impl fmt::Debug + for IntoParIter +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: unsafe { self.inner.par_iter() }, + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel draining iterator over entries of a map. +/// +/// This iterator is created by the [`par_drain`] method on [`HashMap`]. +/// See its documentation for more. +/// +/// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain +/// [`HashMap`]: /hashbrown/struct.HashMap.html +pub struct ParDrain<'a, K, V, A: Allocator + Clone = Global> { + inner: RawParDrain<'a, (K, V), A>, +} + +impl ParallelIterator for ParDrain<'_, K, V, A> { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.drive_unindexed(consumer) + } +} + +impl fmt::Debug + for ParDrain<'_, K, V, A> +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: unsafe { self.inner.par_iter() }, + marker: PhantomData, + } + .fmt(f) + } +} + +impl HashMap { + /// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_keys(&self) -> ParKeys<'_, K, V> { + ParKeys { + inner: unsafe { self.table.par_iter() }, + marker: PhantomData, + } + } + + /// Visits (potentially in parallel) immutably borrowed values in an arbitrary order. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_values(&self) -> ParValues<'_, K, V> { + ParValues { + inner: unsafe { self.table.par_iter() }, + marker: PhantomData, + } + } +} + +impl HashMap { + /// Visits (potentially in parallel) mutably borrowed values in an arbitrary order. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { + ParValuesMut { + inner: unsafe { self.table.par_iter() }, + marker: PhantomData, + } + } + + /// Consumes (potentially in parallel) all values in an arbitrary order, + /// while preserving the map's allocated memory for reuse. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> ParDrain<'_, K, V, A> { + ParDrain { + inner: self.table.par_drain(), + } + } +} + +impl HashMap +where + K: Eq + Hash + Sync, + V: PartialEq + Sync, + S: BuildHasher + Sync, + A: Allocator + Clone + Sync, +{ + /// Returns `true` if the map is equal to another, + /// i.e. both maps contain the same keys mapped to the same values. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_eq(&self, other: &Self) -> bool { + self.len() == other.len() + && self + .into_par_iter() + .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + } +} + +impl IntoParallelIterator + for HashMap +{ + type Item = (K, V); + type Iter = IntoParIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + inner: self.table.into_par_iter(), + } + } +} + +impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator + for &'a HashMap +{ + type Item = (&'a K, &'a V); + type Iter = ParIter<'a, K, V>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIter { + inner: unsafe { self.table.par_iter() }, + marker: PhantomData, + } + } +} + +impl<'a, K: Sync, V: Send, S, A: Allocator + Clone> IntoParallelIterator + for &'a mut HashMap +{ + type Item = (&'a K, &'a mut V); + type Iter = ParIterMut<'a, K, V>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIterMut { + inner: unsafe { self.table.par_iter() }, + marker: PhantomData, + } + } +} + +/// Collect (key, value) pairs from a parallel iterator into a +/// hashmap. If multiple pairs correspond to the same key, then the +/// ones produced earlier in the parallel iterator will be +/// overwritten, just as with a sequential iterator. +impl FromParallelIterator<(K, V)> for HashMap +where + K: Eq + Hash + Send, + V: Send, + S: BuildHasher + Default, +{ + fn from_par_iter

(par_iter: P) -> Self + where + P: IntoParallelIterator, + { + let mut map = HashMap::default(); + map.par_extend(par_iter); + map + } +} + +/// Extend a hash map with items from a parallel iterator. +impl ParallelExtend<(K, V)> for HashMap +where + K: Eq + Hash + Send, + V: Send, + S: BuildHasher, + A: Allocator + Clone, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +/// Extend a hash map with copied items from a parallel iterator. +impl<'a, K, V, S, A> ParallelExtend<(&'a K, &'a V)> for HashMap +where + K: Copy + Eq + Hash + Sync, + V: Copy + Sync, + S: BuildHasher, + A: Allocator + Clone, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +// This is equal to the normal `HashMap` -- no custom advantage. +fn extend(map: &mut HashMap, par_iter: I) +where + K: Eq + Hash, + S: BuildHasher, + I: IntoParallelIterator, + A: Allocator + Clone, + HashMap: Extend, +{ + let (list, len) = super::helpers::collect(par_iter); + + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire length if the map is empty. + // Otherwise reserve half the length (rounded up), so the map + // will only resize twice in the worst case. + let reserve = if map.is_empty() { len } else { (len + 1) / 2 }; + map.reserve(reserve); + for vec in list { + map.extend(vec); + } +} + +#[cfg(test)] +mod test_par_map { + use alloc::vec::Vec; + use core::hash::{Hash, Hasher}; + use core::sync::atomic::{AtomicUsize, Ordering}; + + use rayon::prelude::*; + + use crate::hash_map::HashMap; + + struct Dropable<'a> { + k: usize, + counter: &'a AtomicUsize, + } + + impl Dropable<'_> { + fn new(k: usize, counter: &AtomicUsize) -> Dropable<'_> { + counter.fetch_add(1, Ordering::Relaxed); + + Dropable { k, counter } + } + } + + impl Drop for Dropable<'_> { + fn drop(&mut self) { + self.counter.fetch_sub(1, Ordering::Relaxed); + } + } + + impl Clone for Dropable<'_> { + fn clone(&self) -> Self { + Dropable::new(self.k, self.counter) + } + } + + impl Hash for Dropable<'_> { + fn hash(&self, state: &mut H) + where + H: Hasher, + { + self.k.hash(state); + } + } + + impl PartialEq for Dropable<'_> { + fn eq(&self, other: &Self) -> bool { + self.k == other.k + } + } + + impl Eq for Dropable<'_> {} + + #[test] + fn test_into_iter_drops() { + let key = AtomicUsize::new(0); + let value = AtomicUsize::new(0); + + let hm = { + let mut hm = HashMap::new(); + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + + for i in 0..100 { + let d1 = Dropable::new(i, &key); + let d2 = Dropable::new(i + 100, &value); + hm.insert(d1, d2); + } + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + hm + }; + + // By the way, ensure that cloning doesn't screw up the dropping. + drop(hm.clone()); + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // Ensure that dropping the iterator does not leak anything. + drop(hm.clone().into_par_iter()); + + { + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // retain only half + let _v: Vec<_> = hm + .into_par_iter() + .filter(|&(ref key, _)| key.k < 50) + .collect(); + + assert_eq!(key.load(Ordering::Relaxed), 50); + assert_eq!(value.load(Ordering::Relaxed), 50); + }; + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + } + + #[test] + fn test_drain_drops() { + let key = AtomicUsize::new(0); + let value = AtomicUsize::new(0); + + let mut hm = { + let mut hm = HashMap::new(); + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + + for i in 0..100 { + let d1 = Dropable::new(i, &key); + let d2 = Dropable::new(i + 100, &value); + hm.insert(d1, d2); + } + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + hm + }; + + // By the way, ensure that cloning doesn't screw up the dropping. + drop(hm.clone()); + + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // Ensure that dropping the drain iterator does not leak anything. + drop(hm.clone().par_drain()); + + { + assert_eq!(key.load(Ordering::Relaxed), 100); + assert_eq!(value.load(Ordering::Relaxed), 100); + + // retain only half + let _v: Vec<_> = hm.drain().filter(|&(ref key, _)| key.k < 50).collect(); + assert!(hm.is_empty()); + + assert_eq!(key.load(Ordering::Relaxed), 50); + assert_eq!(value.load(Ordering::Relaxed), 50); + }; + + assert_eq!(key.load(Ordering::Relaxed), 0); + assert_eq!(value.load(Ordering::Relaxed), 0); + } + + #[test] + fn test_empty_iter() { + let mut m: HashMap = HashMap::new(); + assert_eq!(m.par_drain().count(), 0); + assert_eq!(m.par_keys().count(), 0); + assert_eq!(m.par_values().count(), 0); + assert_eq!(m.par_values_mut().count(), 0); + assert_eq!(m.par_iter().count(), 0); + assert_eq!(m.par_iter_mut().count(), 0); + assert_eq!(m.len(), 0); + assert!(m.is_empty()); + assert_eq!(m.into_par_iter().count(), 0); + } + + #[test] + fn test_iterate() { + let mut m = HashMap::with_capacity(4); + for i in 0..32 { + assert!(m.insert(i, i * 2).is_none()); + } + assert_eq!(m.len(), 32); + + let observed = AtomicUsize::new(0); + + m.par_iter().for_each(|(k, v)| { + assert_eq!(*v, *k * 2); + observed.fetch_or(1 << *k, Ordering::Relaxed); + }); + assert_eq!(observed.into_inner(), 0xFFFF_FFFF); + } + + #[test] + fn test_keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_par_iter().collect(); + let keys: Vec<_> = map.par_keys().cloned().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn test_values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_par_iter().collect(); + let values: Vec<_> = map.par_values().cloned().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + fn test_values_mut() { + let vec = vec![(1, 1), (2, 2), (3, 3)]; + let mut map: HashMap<_, _> = vec.into_par_iter().collect(); + map.par_values_mut().for_each(|value| *value *= 2); + let values: Vec<_> = map.par_values().cloned().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&2)); + assert!(values.contains(&4)); + assert!(values.contains(&6)); + } + + #[test] + fn test_eq() { + let mut m1 = HashMap::new(); + m1.insert(1, 2); + m1.insert(2, 3); + m1.insert(3, 4); + + let mut m2 = HashMap::new(); + m2.insert(1, 2); + m2.insert(2, 3); + + assert!(!m1.par_eq(&m2)); + + m2.insert(3, 4); + + assert!(m1.par_eq(&m2)); + } + + #[test] + fn test_from_iter() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.par_iter().cloned().collect(); + + for &(k, v) in &xs { + assert_eq!(map.get(&k), Some(&v)); + } + } + + #[test] + fn test_extend_ref() { + let mut a = HashMap::new(); + a.insert(1, "one"); + let mut b = HashMap::new(); + b.insert(2, "two"); + b.insert(3, "three"); + + a.par_extend(&b); + + assert_eq!(a.len(), 3); + assert_eq!(a[&1], "one"); + assert_eq!(a[&2], "two"); + assert_eq!(a[&3], "three"); + } +} diff --git a/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/mod.rs b/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/mod.rs new file mode 100644 index 0000000..99337a1 --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/mod.rs @@ -0,0 +1,4 @@ +mod helpers; +pub(crate) mod map; +pub(crate) mod raw; +pub(crate) mod set; diff --git a/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/raw.rs b/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/raw.rs new file mode 100644 index 0000000..883303e --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/raw.rs @@ -0,0 +1,231 @@ +use crate::raw::Bucket; +use crate::raw::{Allocator, Global, RawIter, RawIterRange, RawTable}; +use crate::scopeguard::guard; +use alloc::alloc::dealloc; +use core::marker::PhantomData; +use core::mem; +use core::ptr::NonNull; +use rayon::iter::{ + plumbing::{self, Folder, UnindexedConsumer, UnindexedProducer}, + ParallelIterator, +}; + +/// Parallel iterator which returns a raw pointer to every full bucket in the table. +pub struct RawParIter { + iter: RawIterRange, +} + +impl RawParIter { + #[cfg_attr(feature = "inline-more", inline)] + pub(super) unsafe fn iter(&self) -> RawIterRange { + self.iter.clone() + } +} + +impl Clone for RawParIter { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + iter: self.iter.clone(), + } + } +} + +impl From> for RawParIter { + fn from(it: RawIter) -> Self { + RawParIter { iter: it.iter } + } +} + +impl ParallelIterator for RawParIter { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let producer = ParIterProducer { iter: self.iter }; + plumbing::bridge_unindexed(producer, consumer) + } +} + +/// Producer which returns a `Bucket` for every element. +struct ParIterProducer { + iter: RawIterRange, +} + +impl UnindexedProducer for ParIterProducer { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn split(self) -> (Self, Option) { + let (left, right) = self.iter.split(); + let left = ParIterProducer { iter: left }; + let right = right.map(|right| ParIterProducer { iter: right }); + (left, right) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn fold_with(self, folder: F) -> F + where + F: Folder, + { + folder.consume_iter(self.iter) + } +} + +/// Parallel iterator which consumes a table and returns elements. +pub struct RawIntoParIter { + table: RawTable, +} + +impl RawIntoParIter { + #[cfg_attr(feature = "inline-more", inline)] + pub(super) unsafe fn par_iter(&self) -> RawParIter { + self.table.par_iter() + } +} + +impl ParallelIterator for RawIntoParIter { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let iter = unsafe { self.table.iter().iter }; + let _guard = guard(self.table.into_allocation(), |alloc| { + if let Some((ptr, layout)) = *alloc { + unsafe { + dealloc(ptr.as_ptr(), layout); + } + } + }); + let producer = ParDrainProducer { iter }; + plumbing::bridge_unindexed(producer, consumer) + } +} + +/// Parallel iterator which consumes elements without freeing the table storage. +pub struct RawParDrain<'a, T, A: Allocator + Clone = Global> { + // We don't use a &'a mut RawTable because we want RawParDrain to be + // covariant over T. + table: NonNull>, + marker: PhantomData<&'a RawTable>, +} + +unsafe impl Send for RawParDrain<'_, T, A> {} + +impl RawParDrain<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + pub(super) unsafe fn par_iter(&self) -> RawParIter { + self.table.as_ref().par_iter() + } +} + +impl ParallelIterator for RawParDrain<'_, T, A> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + let _guard = guard(self.table, |table| unsafe { + table.as_mut().clear_no_drop(); + }); + let iter = unsafe { self.table.as_ref().iter().iter }; + mem::forget(self); + let producer = ParDrainProducer { iter }; + plumbing::bridge_unindexed(producer, consumer) + } +} + +impl Drop for RawParDrain<'_, T, A> { + fn drop(&mut self) { + // If drive_unindexed is not called then simply clear the table. + unsafe { + self.table.as_mut().clear(); + } + } +} + +/// Producer which will consume all elements in the range, even if it is dropped +/// halfway through. +struct ParDrainProducer { + iter: RawIterRange, +} + +impl UnindexedProducer for ParDrainProducer { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn split(self) -> (Self, Option) { + let (left, right) = self.iter.clone().split(); + mem::forget(self); + let left = ParDrainProducer { iter: left }; + let right = right.map(|right| ParDrainProducer { iter: right }); + (left, right) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn fold_with(mut self, mut folder: F) -> F + where + F: Folder, + { + // Make sure to modify the iterator in-place so that any remaining + // elements are processed in our Drop impl. + for item in &mut self.iter { + folder = folder.consume(unsafe { item.read() }); + if folder.full() { + return folder; + } + } + + // If we processed all elements then we don't need to run the drop. + mem::forget(self); + folder + } +} + +impl Drop for ParDrainProducer { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + // Drop all remaining elements + if mem::needs_drop::() { + for item in &mut self.iter { + unsafe { + item.drop(); + } + } + } + } +} + +impl RawTable { + /// Returns a parallel iterator over the elements in a `RawTable`. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn par_iter(&self) -> RawParIter { + RawParIter { + iter: self.iter().iter, + } + } + + /// Returns a parallel iterator over the elements in a `RawTable`. + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_par_iter(self) -> RawIntoParIter { + RawIntoParIter { table: self } + } + + /// Returns a parallel iterator which consumes all elements of a `RawTable` + /// without freeing its memory allocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> RawParDrain<'_, T, A> { + RawParDrain { + table: NonNull::from(self), + marker: PhantomData, + } + } +} diff --git a/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/set.rs b/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/set.rs new file mode 100644 index 0000000..ee4f6e6 --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/external_trait_impls/rayon/set.rs @@ -0,0 +1,659 @@ +//! Rayon extensions for `HashSet`. + +use super::map; +use crate::hash_set::HashSet; +use crate::raw::{Allocator, Global}; +use core::hash::{BuildHasher, Hash}; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator}; + +/// Parallel iterator over elements of a consumed set. +/// +/// This iterator is created by the [`into_par_iter`] method on [`HashSet`] +/// (provided by the [`IntoParallelIterator`] trait). +/// See its documentation for more. +/// +/// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter +/// [`HashSet`]: /hashbrown/struct.HashSet.html +/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html +pub struct IntoParIter { + inner: map::IntoParIter, +} + +impl ParallelIterator for IntoParIter { + type Item = T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.map(|(k, _)| k).drive_unindexed(consumer) + } +} + +/// Parallel draining iterator over entries of a set. +/// +/// This iterator is created by the [`par_drain`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParDrain<'a, T, A: Allocator + Clone = Global> { + inner: map::ParDrain<'a, T, (), A>, +} + +impl ParallelIterator for ParDrain<'_, T, A> { + type Item = T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.map(|(k, _)| k).drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in a set. +/// +/// This iterator is created by the [`par_iter`] method on [`HashSet`] +/// (provided by the [`IntoParallelRefIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter`]: /hashbrown/struct.HashSet.html#method.par_iter +/// [`HashSet`]: /hashbrown/struct.HashSet.html +/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html +pub struct ParIter<'a, T> { + inner: map::ParKeys<'a, T, ()>, +} + +impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the difference of +/// sets. +/// +/// This iterator is created by the [`par_difference`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParDifference<'a, T, S, A: Allocator + Clone = Global> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S, A> ParallelIterator for ParDifference<'a, T, S, A> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, + A: Allocator + Clone + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.a + .into_par_iter() + .filter(|&x| !self.b.contains(x)) + .drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the symmetric +/// difference of sets. +/// +/// This iterator is created by the [`par_symmetric_difference`] method on +/// [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParSymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S, A> ParallelIterator for ParSymmetricDifference<'a, T, S, A> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, + A: Allocator + Clone + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.a + .par_difference(self.b) + .chain(self.b.par_difference(self.a)) + .drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the intersection of +/// sets. +/// +/// This iterator is created by the [`par_intersection`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParIntersection<'a, T, S, A: Allocator + Clone = Global> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S, A> ParallelIterator for ParIntersection<'a, T, S, A> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, + A: Allocator + Clone + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.a + .into_par_iter() + .filter(|&x| self.b.contains(x)) + .drive_unindexed(consumer) + } +} + +/// Parallel iterator over shared references to elements in the union of sets. +/// +/// This iterator is created by the [`par_union`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union +/// [`HashSet`]: /hashbrown/struct.HashSet.html +pub struct ParUnion<'a, T, S, A: Allocator + Clone = Global> { + a: &'a HashSet, + b: &'a HashSet, +} + +impl<'a, T, S, A> ParallelIterator for ParUnion<'a, T, S, A> +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, + A: Allocator + Clone + Sync, +{ + type Item = &'a T; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + // We'll iterate one set in full, and only the remaining difference from the other. + // Use the smaller set for the difference in order to reduce hash lookups. + let (smaller, larger) = if self.a.len() <= self.b.len() { + (self.a, self.b) + } else { + (self.b, self.a) + }; + larger + .into_par_iter() + .chain(smaller.par_difference(larger)) + .drive_unindexed(consumer) + } +} + +impl HashSet +where + T: Eq + Hash + Sync, + S: BuildHasher + Sync, + A: Allocator + Clone + Sync, +{ + /// Visits (potentially in parallel) the values representing the union, + /// i.e. all the values in `self` or `other`, without duplicates. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_union<'a>(&'a self, other: &'a Self) -> ParUnion<'a, T, S, A> { + ParUnion { a: self, b: other } + } + + /// Visits (potentially in parallel) the values representing the difference, + /// i.e. the values that are in `self` but not in `other`. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_difference<'a>(&'a self, other: &'a Self) -> ParDifference<'a, T, S, A> { + ParDifference { a: self, b: other } + } + + /// Visits (potentially in parallel) the values representing the symmetric + /// difference, i.e. the values that are in `self` or in `other` but not in both. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_symmetric_difference<'a>( + &'a self, + other: &'a Self, + ) -> ParSymmetricDifference<'a, T, S, A> { + ParSymmetricDifference { a: self, b: other } + } + + /// Visits (potentially in parallel) the values representing the + /// intersection, i.e. the values that are both in `self` and `other`. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_intersection<'a>(&'a self, other: &'a Self) -> ParIntersection<'a, T, S, A> { + ParIntersection { a: self, b: other } + } + + /// Returns `true` if `self` has no elements in common with `other`. + /// This is equivalent to checking for an empty intersection. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_is_disjoint(&self, other: &Self) -> bool { + self.into_par_iter().all(|x| !other.contains(x)) + } + + /// Returns `true` if the set is a subset of another, + /// i.e. `other` contains at least all the values in `self`. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_is_subset(&self, other: &Self) -> bool { + if self.len() <= other.len() { + self.into_par_iter().all(|x| other.contains(x)) + } else { + false + } + } + + /// Returns `true` if the set is a superset of another, + /// i.e. `self` contains at least all the values in `other`. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_is_superset(&self, other: &Self) -> bool { + other.par_is_subset(self) + } + + /// Returns `true` if the set is equal to another, + /// i.e. both sets contain the same values. + /// + /// This method runs in a potentially parallel fashion. + pub fn par_eq(&self, other: &Self) -> bool { + self.len() == other.len() && self.par_is_subset(other) + } +} + +impl HashSet +where + T: Eq + Hash + Send, + A: Allocator + Clone + Send, +{ + /// Consumes (potentially in parallel) all values in an arbitrary order, + /// while preserving the set's allocated memory for reuse. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> ParDrain<'_, T, A> { + ParDrain { + inner: self.map.par_drain(), + } + } +} + +impl IntoParallelIterator for HashSet { + type Item = T; + type Iter = IntoParIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + inner: self.map.into_par_iter(), + } + } +} + +impl<'a, T: Sync, S, A: Allocator + Clone> IntoParallelIterator for &'a HashSet { + type Item = &'a T; + type Iter = ParIter<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIter { + inner: self.map.par_keys(), + } + } +} + +/// Collect values from a parallel iterator into a hashset. +impl FromParallelIterator for HashSet +where + T: Eq + Hash + Send, + S: BuildHasher + Default, +{ + fn from_par_iter

(par_iter: P) -> Self + where + P: IntoParallelIterator, + { + let mut set = HashSet::default(); + set.par_extend(par_iter); + set + } +} + +/// Extend a hash set with items from a parallel iterator. +impl ParallelExtend for HashSet +where + T: Eq + Hash + Send, + S: BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +/// Extend a hash set with copied items from a parallel iterator. +impl<'a, T, S> ParallelExtend<&'a T> for HashSet +where + T: 'a + Copy + Eq + Hash + Sync, + S: BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + extend(self, par_iter); + } +} + +// This is equal to the normal `HashSet` -- no custom advantage. +fn extend(set: &mut HashSet, par_iter: I) +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, + I: IntoParallelIterator, + HashSet: Extend, +{ + let (list, len) = super::helpers::collect(par_iter); + + // Values may be already present or show multiple times in the iterator. + // Reserve the entire length if the set is empty. + // Otherwise reserve half the length (rounded up), so the set + // will only resize twice in the worst case. + let reserve = if set.is_empty() { len } else { (len + 1) / 2 }; + set.reserve(reserve); + for vec in list { + set.extend(vec); + } +} + +#[cfg(test)] +mod test_par_set { + use alloc::vec::Vec; + use core::sync::atomic::{AtomicUsize, Ordering}; + + use rayon::prelude::*; + + use crate::hash_set::HashSet; + + #[test] + fn test_disjoint() { + let mut xs = HashSet::new(); + let mut ys = HashSet::new(); + assert!(xs.par_is_disjoint(&ys)); + assert!(ys.par_is_disjoint(&xs)); + assert!(xs.insert(5)); + assert!(ys.insert(11)); + assert!(xs.par_is_disjoint(&ys)); + assert!(ys.par_is_disjoint(&xs)); + assert!(xs.insert(7)); + assert!(xs.insert(19)); + assert!(xs.insert(4)); + assert!(ys.insert(2)); + assert!(ys.insert(-11)); + assert!(xs.par_is_disjoint(&ys)); + assert!(ys.par_is_disjoint(&xs)); + assert!(ys.insert(7)); + assert!(!xs.par_is_disjoint(&ys)); + assert!(!ys.par_is_disjoint(&xs)); + } + + #[test] + fn test_subset_and_superset() { + let mut a = HashSet::new(); + assert!(a.insert(0)); + assert!(a.insert(5)); + assert!(a.insert(11)); + assert!(a.insert(7)); + + let mut b = HashSet::new(); + assert!(b.insert(0)); + assert!(b.insert(7)); + assert!(b.insert(19)); + assert!(b.insert(250)); + assert!(b.insert(11)); + assert!(b.insert(200)); + + assert!(!a.par_is_subset(&b)); + assert!(!a.par_is_superset(&b)); + assert!(!b.par_is_subset(&a)); + assert!(!b.par_is_superset(&a)); + + assert!(b.insert(5)); + + assert!(a.par_is_subset(&b)); + assert!(!a.par_is_superset(&b)); + assert!(!b.par_is_subset(&a)); + assert!(b.par_is_superset(&a)); + } + + #[test] + fn test_iterate() { + let mut a = HashSet::new(); + for i in 0..32 { + assert!(a.insert(i)); + } + let observed = AtomicUsize::new(0); + a.par_iter().for_each(|k| { + observed.fetch_or(1 << *k, Ordering::Relaxed); + }); + assert_eq!(observed.into_inner(), 0xFFFF_FFFF); + } + + #[test] + fn test_intersection() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(11)); + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(77)); + assert!(a.insert(103)); + assert!(a.insert(5)); + assert!(a.insert(-5)); + + assert!(b.insert(2)); + assert!(b.insert(11)); + assert!(b.insert(77)); + assert!(b.insert(-9)); + assert!(b.insert(-42)); + assert!(b.insert(5)); + assert!(b.insert(3)); + + let expected = [3, 5, 11, 77]; + let i = a + .par_intersection(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(3)); + assert!(b.insert(9)); + + let expected = [1, 5, 11]; + let i = a + .par_difference(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_symmetric_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(-2)); + assert!(b.insert(3)); + assert!(b.insert(9)); + assert!(b.insert(14)); + assert!(b.insert(22)); + + let expected = [-2, 1, 5, 11, 14, 22]; + let i = a + .par_symmetric_difference(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_union() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + assert!(a.insert(16)); + assert!(a.insert(19)); + assert!(a.insert(24)); + + assert!(b.insert(-2)); + assert!(b.insert(1)); + assert!(b.insert(5)); + assert!(b.insert(9)); + assert!(b.insert(13)); + assert!(b.insert(19)); + + let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24]; + let i = a + .par_union(&b) + .map(|x| { + assert!(expected.contains(x)); + 1 + }) + .sum::(); + assert_eq!(i, expected.len()); + } + + #[test] + fn test_from_iter() { + let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9]; + + let set: HashSet<_> = xs.par_iter().cloned().collect(); + + for x in &xs { + assert!(set.contains(x)); + } + } + + #[test] + fn test_move_iter() { + let hs = { + let mut hs = HashSet::new(); + + hs.insert('a'); + hs.insert('b'); + + hs + }; + + let v = hs.into_par_iter().collect::>(); + assert!(v == ['a', 'b'] || v == ['b', 'a']); + } + + #[test] + fn test_eq() { + // These constants once happened to expose a bug in insert(). + // I'm keeping them around to prevent a regression. + let mut s1 = HashSet::new(); + + s1.insert(1); + s1.insert(2); + s1.insert(3); + + let mut s2 = HashSet::new(); + + s2.insert(1); + s2.insert(2); + + assert!(!s1.par_eq(&s2)); + + s2.insert(3); + + assert!(s1.par_eq(&s2)); + } + + #[test] + fn test_extend_ref() { + let mut a = HashSet::new(); + a.insert(1); + + a.par_extend(&[2, 3, 4][..]); + + assert_eq!(a.len(), 4); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + + let mut b = HashSet::new(); + b.insert(5); + b.insert(6); + + a.par_extend(&b); + + assert_eq!(a.len(), 6); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + assert!(a.contains(&5)); + assert!(a.contains(&6)); + } +} diff --git a/vendor/hashbrown-0.12.3/src/external_trait_impls/serde.rs b/vendor/hashbrown-0.12.3/src/external_trait_impls/serde.rs new file mode 100644 index 0000000..4d62dee --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/external_trait_impls/serde.rs @@ -0,0 +1,201 @@ +mod size_hint { + use core::cmp; + + /// This presumably exists to prevent denial of service attacks. + /// + /// Original discussion: https://github.com/serde-rs/serde/issues/1114. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn cautious(hint: Option) -> usize { + cmp::min(hint.unwrap_or(0), 4096) + } +} + +mod map { + use core::fmt; + use core::hash::{BuildHasher, Hash}; + use core::marker::PhantomData; + use serde::de::{Deserialize, Deserializer, MapAccess, Visitor}; + use serde::ser::{Serialize, Serializer}; + + use crate::hash_map::HashMap; + + use super::size_hint; + + impl Serialize for HashMap + where + K: Serialize + Eq + Hash, + V: Serialize, + H: BuildHasher, + { + #[cfg_attr(feature = "inline-more", inline)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_map(self) + } + } + + impl<'de, K, V, S> Deserialize<'de> for HashMap + where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: BuildHasher + Default, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct MapVisitor { + marker: PhantomData>, + } + + impl<'de, K, V, S> Visitor<'de> for MapVisitor + where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: BuildHasher + Default, + { + type Value = HashMap; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a map") + } + + #[cfg_attr(feature = "inline-more", inline)] + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let mut values = HashMap::with_capacity_and_hasher( + size_hint::cautious(map.size_hint()), + S::default(), + ); + + while let Some((key, value)) = map.next_entry()? { + values.insert(key, value); + } + + Ok(values) + } + } + + let visitor = MapVisitor { + marker: PhantomData, + }; + deserializer.deserialize_map(visitor) + } + } +} + +mod set { + use core::fmt; + use core::hash::{BuildHasher, Hash}; + use core::marker::PhantomData; + use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor}; + use serde::ser::{Serialize, Serializer}; + + use crate::hash_set::HashSet; + + use super::size_hint; + + impl Serialize for HashSet + where + T: Serialize + Eq + Hash, + H: BuildHasher, + { + #[cfg_attr(feature = "inline-more", inline)] + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_seq(self) + } + } + + impl<'de, T, S> Deserialize<'de> for HashSet + where + T: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Default, + { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct SeqVisitor { + marker: PhantomData>, + } + + impl<'de, T, S> Visitor<'de> for SeqVisitor + where + T: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Default, + { + type Value = HashSet; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a sequence") + } + + #[cfg_attr(feature = "inline-more", inline)] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + let mut values = HashSet::with_capacity_and_hasher( + size_hint::cautious(seq.size_hint()), + S::default(), + ); + + while let Some(value) = seq.next_element()? { + values.insert(value); + } + + Ok(values) + } + } + + let visitor = SeqVisitor { + marker: PhantomData, + }; + deserializer.deserialize_seq(visitor) + } + + #[allow(clippy::missing_errors_doc)] + fn deserialize_in_place(deserializer: D, place: &mut Self) -> Result<(), D::Error> + where + D: Deserializer<'de>, + { + struct SeqInPlaceVisitor<'a, T, S>(&'a mut HashSet); + + impl<'a, 'de, T, S> Visitor<'de> for SeqInPlaceVisitor<'a, T, S> + where + T: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Default, + { + type Value = (); + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("a sequence") + } + + #[cfg_attr(feature = "inline-more", inline)] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + self.0.clear(); + self.0.reserve(size_hint::cautious(seq.size_hint())); + + while let Some(value) = seq.next_element()? { + self.0.insert(value); + } + + Ok(()) + } + } + + deserializer.deserialize_seq(SeqInPlaceVisitor(place)) + } + } +} diff --git a/vendor/hashbrown-0.12.3/src/lib.rs b/vendor/hashbrown-0.12.3/src/lib.rs new file mode 100644 index 0000000..bc1c971 --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/lib.rs @@ -0,0 +1,150 @@ +//! This crate is a Rust port of Google's high-performance [SwissTable] hash +//! map, adapted to make it a drop-in replacement for Rust's standard `HashMap` +//! and `HashSet` types. +//! +//! The original C++ version of [SwissTable] can be found [here], and this +//! [CppCon talk] gives an overview of how the algorithm works. +//! +//! [SwissTable]: https://abseil.io/blog/20180927-swisstables +//! [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h +//! [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4 + +#![no_std] +#![cfg_attr( + feature = "nightly", + feature( + test, + core_intrinsics, + dropck_eyepatch, + min_specialization, + extend_one, + allocator_api, + slice_ptr_get, + nonnull_slice_from_raw_parts, + maybe_uninit_array_assume_init, + build_hasher_simple_hash_one + ) +)] +#![allow( + clippy::doc_markdown, + clippy::module_name_repetitions, + clippy::must_use_candidate, + clippy::option_if_let_else, + clippy::redundant_else, + clippy::manual_map, + clippy::missing_safety_doc, + clippy::missing_errors_doc +)] +#![warn(missing_docs)] +#![warn(rust_2018_idioms)] + +#[cfg(test)] +#[macro_use] +extern crate std; + +#[cfg_attr(test, macro_use)] +extern crate alloc; + +#[cfg(feature = "nightly")] +#[cfg(doctest)] +doc_comment::doctest!("../README.md"); + +#[macro_use] +mod macros; + +#[cfg(feature = "raw")] +/// Experimental and unsafe `RawTable` API. This module is only available if the +/// `raw` feature is enabled. +pub mod raw { + // The RawTable API is still experimental and is not properly documented yet. + #[allow(missing_docs)] + #[path = "mod.rs"] + mod inner; + pub use inner::*; + + #[cfg(feature = "rayon")] + /// [rayon]-based parallel iterator types for hash maps. + /// You will rarely need to interact with it directly unless you have need + /// to name one of the iterator types. + /// + /// [rayon]: https://docs.rs/rayon/1.0/rayon + pub mod rayon { + pub use crate::external_trait_impls::rayon::raw::*; + } +} +#[cfg(not(feature = "raw"))] +mod raw; + +mod external_trait_impls; +mod map; +#[cfg(feature = "rustc-internal-api")] +mod rustc_entry; +mod scopeguard; +mod set; + +pub mod hash_map { + //! A hash map implemented with quadratic probing and SIMD lookup. + pub use crate::map::*; + + #[cfg(feature = "rustc-internal-api")] + pub use crate::rustc_entry::*; + + #[cfg(feature = "rayon")] + /// [rayon]-based parallel iterator types for hash maps. + /// You will rarely need to interact with it directly unless you have need + /// to name one of the iterator types. + /// + /// [rayon]: https://docs.rs/rayon/1.0/rayon + pub mod rayon { + pub use crate::external_trait_impls::rayon::map::*; + } +} +pub mod hash_set { + //! A hash set implemented as a `HashMap` where the value is `()`. + pub use crate::set::*; + + #[cfg(feature = "rayon")] + /// [rayon]-based parallel iterator types for hash sets. + /// You will rarely need to interact with it directly unless you have need + /// to name one of the iterator types. + /// + /// [rayon]: https://docs.rs/rayon/1.0/rayon + pub mod rayon { + pub use crate::external_trait_impls::rayon::set::*; + } +} + +pub use crate::map::HashMap; +pub use crate::set::HashSet; + +/// The error type for `try_reserve` methods. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum TryReserveError { + /// Error due to the computed capacity exceeding the collection's maximum + /// (usually `isize::MAX` bytes). + CapacityOverflow, + + /// The memory allocator returned an error + AllocError { + /// The layout of the allocation request that failed. + layout: alloc::alloc::Layout, + }, +} + +/// Wrapper around `Bump` which allows it to be used as an allocator for +/// `HashMap`, `HashSet` and `RawTable`. +/// +/// `Bump` can be used directly without this wrapper on nightly if you enable +/// the `allocator-api` feature of the `bumpalo` crate. +#[cfg(feature = "bumpalo")] +#[derive(Clone, Copy, Debug)] +pub struct BumpWrapper<'a>(pub &'a bumpalo::Bump); + +#[cfg(feature = "bumpalo")] +#[test] +fn test_bumpalo() { + use bumpalo::Bump; + let bump = Bump::new(); + let mut map = HashMap::new_in(BumpWrapper(&bump)); + map.insert(0, 1); +} diff --git a/vendor/hashbrown-0.12.3/src/macros.rs b/vendor/hashbrown-0.12.3/src/macros.rs new file mode 100644 index 0000000..f8ef917 --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/macros.rs @@ -0,0 +1,70 @@ +// See the cfg-if crate. +#[allow(unused_macro_rules)] +macro_rules! cfg_if { + // match if/else chains with a final `else` + ($( + if #[cfg($($meta:meta),*)] { $($it:item)* } + ) else * else { + $($it2:item)* + }) => { + cfg_if! { + @__items + () ; + $( ( ($($meta),*) ($($it)*) ), )* + ( () ($($it2)*) ), + } + }; + + // match if/else chains lacking a final `else` + ( + if #[cfg($($i_met:meta),*)] { $($i_it:item)* } + $( + else if #[cfg($($e_met:meta),*)] { $($e_it:item)* } + )* + ) => { + cfg_if! { + @__items + () ; + ( ($($i_met),*) ($($i_it)*) ), + $( ( ($($e_met),*) ($($e_it)*) ), )* + ( () () ), + } + }; + + // Internal and recursive macro to emit all the items + // + // Collects all the negated cfgs in a list at the beginning and after the + // semicolon is all the remaining items + (@__items ($($not:meta,)*) ; ) => {}; + (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { + // Emit all items within one block, applying an approprate #[cfg]. The + // #[cfg] will require all `$m` matchers specified and must also negate + // all previous matchers. + cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } + + // Recurse to emit all other items in `$rest`, and when we do so add all + // our `$m` matchers to the list of `$not` matchers as future emissions + // will have to negate everything we just matched as well. + cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } + }; + + // Internal macro to Apply a cfg attribute to a list of items + (@__apply $m:meta, $($it:item)*) => { + $(#[$m] $it)* + }; +} + +// Helper macro for specialization. This also helps avoid parse errors if the +// default fn syntax for specialization changes in the future. +#[cfg(feature = "nightly")] +macro_rules! default_fn { + (#[$($a:tt)*] $($tt:tt)*) => { + #[$($a)*] default $($tt)* + } +} +#[cfg(not(feature = "nightly"))] +macro_rules! default_fn { + ($($tt:tt)*) => { + $($tt)* + } +} diff --git a/vendor/hashbrown-0.12.3/src/map.rs b/vendor/hashbrown-0.12.3/src/map.rs new file mode 100644 index 0000000..a5d3ccb --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/map.rs @@ -0,0 +1,8408 @@ +use crate::raw::{Allocator, Bucket, Global, RawDrain, RawIntoIter, RawIter, RawTable}; +use crate::TryReserveError; +use core::borrow::Borrow; +use core::fmt::{self, Debug}; +use core::hash::{BuildHasher, Hash}; +use core::iter::{FromIterator, FusedIterator}; +use core::marker::PhantomData; +use core::mem; +use core::ops::Index; + +/// Default hasher for `HashMap`. +#[cfg(feature = "ahash")] +pub type DefaultHashBuilder = ahash::RandomState; + +/// Dummy default hasher for `HashMap`. +#[cfg(not(feature = "ahash"))] +pub enum DefaultHashBuilder {} + +/// A hash map implemented with quadratic probing and SIMD lookup. +/// +/// The default hashing algorithm is currently [`AHash`], though this is +/// subject to change at any point in the future. This hash function is very +/// fast for all types of keys, but this algorithm will typically *not* protect +/// against attacks such as HashDoS. +/// +/// The hashing algorithm can be replaced on a per-`HashMap` basis using the +/// [`default`], [`with_hasher`], and [`with_capacity_and_hasher`] methods. Many +/// alternative algorithms are available on crates.io, such as the [`fnv`] crate. +/// +/// It is required that the keys implement the [`Eq`] and [`Hash`] traits, although +/// this can frequently be achieved by using `#[derive(PartialEq, Eq, Hash)]`. +/// If you implement these yourself, it is important that the following +/// property holds: +/// +/// ```text +/// k1 == k2 -> hash(k1) == hash(k2) +/// ``` +/// +/// In other words, if two keys are equal, their hashes must be equal. +/// +/// It is a logic error for a key to be modified in such a way that the key's +/// hash, as determined by the [`Hash`] trait, or its equality, as determined by +/// the [`Eq`] trait, changes while it is in the map. This is normally only +/// possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. +/// +/// It is also a logic error for the [`Hash`] implementation of a key to panic. +/// This is generally only possible if the trait is implemented manually. If a +/// panic does occur then the contents of the `HashMap` may become corrupted and +/// some items may be dropped from the table. +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// // Type inference lets us omit an explicit type signature (which +/// // would be `HashMap` in this example). +/// let mut book_reviews = HashMap::new(); +/// +/// // Review some books. +/// book_reviews.insert( +/// "Adventures of Huckleberry Finn".to_string(), +/// "My favorite book.".to_string(), +/// ); +/// book_reviews.insert( +/// "Grimms' Fairy Tales".to_string(), +/// "Masterpiece.".to_string(), +/// ); +/// book_reviews.insert( +/// "Pride and Prejudice".to_string(), +/// "Very enjoyable.".to_string(), +/// ); +/// book_reviews.insert( +/// "The Adventures of Sherlock Holmes".to_string(), +/// "Eye lyked it alot.".to_string(), +/// ); +/// +/// // Check for a specific one. +/// // When collections store owned values (String), they can still be +/// // queried using references (&str). +/// if !book_reviews.contains_key("Les Misérables") { +/// println!("We've got {} reviews, but Les Misérables ain't one.", +/// book_reviews.len()); +/// } +/// +/// // oops, this review has a lot of spelling mistakes, let's delete it. +/// book_reviews.remove("The Adventures of Sherlock Holmes"); +/// +/// // Look up the values associated with some keys. +/// let to_find = ["Pride and Prejudice", "Alice's Adventure in Wonderland"]; +/// for &book in &to_find { +/// match book_reviews.get(book) { +/// Some(review) => println!("{}: {}", book, review), +/// None => println!("{} is unreviewed.", book) +/// } +/// } +/// +/// // Look up the value for a key (will panic if the key is not found). +/// println!("Review for Jane: {}", book_reviews["Pride and Prejudice"]); +/// +/// // Iterate over everything. +/// for (book, review) in &book_reviews { +/// println!("{}: \"{}\"", book, review); +/// } +/// ``` +/// +/// `HashMap` also implements an [`Entry API`](#method.entry), which allows +/// for more complex methods of getting, setting, updating and removing keys and +/// their values: +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// // type inference lets us omit an explicit type signature (which +/// // would be `HashMap<&str, u8>` in this example). +/// let mut player_stats = HashMap::new(); +/// +/// fn random_stat_buff() -> u8 { +/// // could actually return some random value here - let's just return +/// // some fixed value for now +/// 42 +/// } +/// +/// // insert a key only if it doesn't already exist +/// player_stats.entry("health").or_insert(100); +/// +/// // insert a key using a function that provides a new value only if it +/// // doesn't already exist +/// player_stats.entry("defence").or_insert_with(random_stat_buff); +/// +/// // update a key, guarding against the key possibly not being set +/// let stat = player_stats.entry("attack").or_insert(100); +/// *stat += random_stat_buff(); +/// ``` +/// +/// The easiest way to use `HashMap` with a custom key type is to derive [`Eq`] and [`Hash`]. +/// We must also derive [`PartialEq`]. +/// +/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html +/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html +/// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html +/// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html +/// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Cell.html +/// [`default`]: #method.default +/// [`with_hasher`]: #method.with_hasher +/// [`with_capacity_and_hasher`]: #method.with_capacity_and_hasher +/// [`fnv`]: https://crates.io/crates/fnv +/// [`AHash`]: https://crates.io/crates/ahash +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// #[derive(Hash, Eq, PartialEq, Debug)] +/// struct Viking { +/// name: String, +/// country: String, +/// } +/// +/// impl Viking { +/// /// Creates a new Viking. +/// fn new(name: &str, country: &str) -> Viking { +/// Viking { name: name.to_string(), country: country.to_string() } +/// } +/// } +/// +/// // Use a HashMap to store the vikings' health points. +/// let mut vikings = HashMap::new(); +/// +/// vikings.insert(Viking::new("Einar", "Norway"), 25); +/// vikings.insert(Viking::new("Olaf", "Denmark"), 24); +/// vikings.insert(Viking::new("Harald", "Iceland"), 12); +/// +/// // Use derived implementation to print the status of the vikings. +/// for (viking, health) in &vikings { +/// println!("{:?} has {} hp", viking, health); +/// } +/// ``` +/// +/// A `HashMap` with fixed list of elements can be initialized from an array: +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let timber_resources: HashMap<&str, i32> = [("Norway", 100), ("Denmark", 50), ("Iceland", 10)] +/// .iter().cloned().collect(); +/// // use the values stored in map +/// ``` +pub struct HashMap { + pub(crate) hash_builder: S, + pub(crate) table: RawTable<(K, V), A>, +} + +impl Clone for HashMap { + fn clone(&self) -> Self { + HashMap { + hash_builder: self.hash_builder.clone(), + table: self.table.clone(), + } + } + + fn clone_from(&mut self, source: &Self) { + self.table.clone_from(&source.table); + + // Update hash_builder only if we successfully cloned all elements. + self.hash_builder.clone_from(&source.hash_builder); + } +} + +/// Ensures that a single closure type across uses of this which, in turn prevents multiple +/// instances of any functions like RawTable::reserve from being generated +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_hasher(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_ +where + K: Borrow, + Q: Hash, + S: BuildHasher, +{ + move |val| make_hash::(hash_builder, &val.0) +} + +/// Ensures that a single closure type across uses of this which, in turn prevents multiple +/// instances of any functions like RawTable::reserve from being generated +#[cfg_attr(feature = "inline-more", inline)] +fn equivalent_key(k: &Q) -> impl Fn(&(K, V)) -> bool + '_ +where + K: Borrow, + Q: ?Sized + Eq, +{ + move |x| k.eq(x.0.borrow()) +} + +/// Ensures that a single closure type across uses of this which, in turn prevents multiple +/// instances of any functions like RawTable::reserve from being generated +#[cfg_attr(feature = "inline-more", inline)] +fn equivalent(k: &Q) -> impl Fn(&K) -> bool + '_ +where + K: Borrow, + Q: ?Sized + Eq, +{ + move |x| k.eq(x.borrow()) +} + +#[cfg(not(feature = "nightly"))] +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 +where + K: Borrow, + Q: Hash + ?Sized, + S: BuildHasher, +{ + use core::hash::Hasher; + let mut state = hash_builder.build_hasher(); + val.hash(&mut state); + state.finish() +} + +#[cfg(feature = "nightly")] +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_hash(hash_builder: &S, val: &Q) -> u64 +where + K: Borrow, + Q: Hash + ?Sized, + S: BuildHasher, +{ + hash_builder.hash_one(val) +} + +#[cfg(not(feature = "nightly"))] +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_insert_hash(hash_builder: &S, val: &K) -> u64 +where + K: Hash, + S: BuildHasher, +{ + use core::hash::Hasher; + let mut state = hash_builder.build_hasher(); + val.hash(&mut state); + state.finish() +} + +#[cfg(feature = "nightly")] +#[cfg_attr(feature = "inline-more", inline)] +pub(crate) fn make_insert_hash(hash_builder: &S, val: &K) -> u64 +where + K: Hash, + S: BuildHasher, +{ + hash_builder.hash_one(val) +} + +#[cfg(feature = "ahash")] +impl HashMap { + /// Creates an empty `HashMap`. + /// + /// The hash map is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<&str, i32> = HashMap::new(); + /// assert_eq!(map.len(), 0); + /// assert_eq!(map.capacity(), 0); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn new() -> Self { + Self::default() + } + + /// Creates an empty `HashMap` with the specified capacity. + /// + /// The hash map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash map will not allocate. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<&str, i32> = HashMap::with_capacity(10); + /// assert_eq!(map.len(), 0); + /// assert!(map.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_and_hasher(capacity, DefaultHashBuilder::default()) + } +} + +#[cfg(feature = "ahash")] +impl HashMap { + /// Creates an empty `HashMap` using the given allocator. + /// + /// The hash map is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + #[cfg_attr(feature = "inline-more", inline)] + pub fn new_in(alloc: A) -> Self { + Self::with_hasher_in(DefaultHashBuilder::default(), alloc) + } + + /// Creates an empty `HashMap` with the specified capacity using the given allocator. + /// + /// The hash map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash map will not allocate. + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + Self::with_capacity_and_hasher_in(capacity, DefaultHashBuilder::default(), alloc) + } +} + +impl HashMap { + /// Creates an empty `HashMap` which will use the given hash builder to hash + /// keys. + /// + /// The hash map is initially created with a capacity of 0, so it will not + /// allocate until it is first inserted into. + /// + /// Warning: `hash_builder` is normally randomly generated, and + /// is designed to allow HashMaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the HashMap to be useful, see its documentation for details. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut map = HashMap::with_hasher(s); + /// assert_eq!(map.len(), 0); + /// assert_eq!(map.capacity(), 0); + /// + /// map.insert(1, 2); + /// ``` + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + #[cfg_attr(feature = "inline-more", inline)] + pub const fn with_hasher(hash_builder: S) -> Self { + Self { + hash_builder, + table: RawTable::new(), + } + } + + /// Creates an empty `HashMap` with the specified capacity, using `hash_builder` + /// to hash the keys. + /// + /// The hash map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash map will not allocate. + /// + /// Warning: `hash_builder` is normally randomly generated, and + /// is designed to allow HashMaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the HashMap to be useful, see its documentation for details. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut map = HashMap::with_capacity_and_hasher(10, s); + /// assert_eq!(map.len(), 0); + /// assert!(map.capacity() >= 10); + /// + /// map.insert(1, 2); + /// ``` + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self { + Self { + hash_builder, + table: RawTable::with_capacity(capacity), + } + } +} + +impl HashMap { + /// Returns a reference to the underlying allocator. + #[inline] + pub fn allocator(&self) -> &A { + self.table.allocator() + } + + /// Creates an empty `HashMap` which will use the given hash builder to hash + /// keys. It will be allocated with the given allocator. + /// + /// The created map has the default initial capacity. + /// + /// Warning: `hash_builder` is normally randomly generated, and + /// is designed to allow HashMaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut map = HashMap::with_hasher(s); + /// map.insert(1, 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_hasher_in(hash_builder: S, alloc: A) -> Self { + Self { + hash_builder, + table: RawTable::new_in(alloc), + } + } + + /// Creates an empty `HashMap` with the specified capacity, using `hash_builder` + /// to hash the keys. It will be allocated with the given allocator. + /// + /// The hash map will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash map will not allocate. + /// + /// Warning: `hash_builder` is normally randomly generated, and + /// is designed to allow HashMaps to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut map = HashMap::with_capacity_and_hasher(10, s); + /// map.insert(1, 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_and_hasher_in(capacity: usize, hash_builder: S, alloc: A) -> Self { + Self { + hash_builder, + table: RawTable::with_capacity_in(capacity, alloc), + } + } + + /// Returns a reference to the map's [`BuildHasher`]. + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let hasher = DefaultHashBuilder::default(); + /// let map: HashMap = HashMap::with_hasher(hasher); + /// let hasher: &DefaultHashBuilder = map.hasher(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn hasher(&self) -> &S { + &self.hash_builder + } + + /// Returns the number of elements the map can hold without reallocating. + /// + /// This number is a lower bound; the `HashMap` might be able to hold + /// more, but is guaranteed to be able to hold at least this many. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let map: HashMap = HashMap::with_capacity(100); + /// assert_eq!(map.len(), 0); + /// assert!(map.capacity() >= 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn capacity(&self) -> usize { + self.table.capacity() + } + + /// An iterator visiting all keys in arbitrary order. + /// The iterator element type is `&'a K`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// assert_eq!(map.len(), 3); + /// let mut vec: Vec<&str> = Vec::new(); + /// + /// for key in map.keys() { + /// println!("{}", key); + /// vec.push(*key); + /// } + /// + /// // The `Keys` iterator produces keys in arbitrary order, so the + /// // keys must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, ["a", "b", "c"]); + /// + /// assert_eq!(map.len(), 3); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn keys(&self) -> Keys<'_, K, V> { + Keys { inner: self.iter() } + } + + /// An iterator visiting all values in arbitrary order. + /// The iterator element type is `&'a V`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// assert_eq!(map.len(), 3); + /// let mut vec: Vec = Vec::new(); + /// + /// for val in map.values() { + /// println!("{}", val); + /// vec.push(*val); + /// } + /// + /// // The `Values` iterator produces values in arbitrary order, so the + /// // values must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [1, 2, 3]); + /// + /// assert_eq!(map.len(), 3); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn values(&self) -> Values<'_, K, V> { + Values { inner: self.iter() } + } + + /// An iterator visiting all values mutably in arbitrary order. + /// The iterator element type is `&'a mut V`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// for val in map.values_mut() { + /// *val = *val + 10; + /// } + /// + /// assert_eq!(map.len(), 3); + /// let mut vec: Vec = Vec::new(); + /// + /// for val in map.values() { + /// println!("{}", val); + /// vec.push(*val); + /// } + /// + /// // The `Values` iterator produces values in arbitrary order, so the + /// // values must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [11, 12, 13]); + /// + /// assert_eq!(map.len(), 3); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { + ValuesMut { + inner: self.iter_mut(), + } + } + + /// An iterator visiting all key-value pairs in arbitrary order. + /// The iterator element type is `(&'a K, &'a V)`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// assert_eq!(map.len(), 3); + /// let mut vec: Vec<(&str, i32)> = Vec::new(); + /// + /// for (key, val) in map.iter() { + /// println!("key: {} val: {}", key, val); + /// vec.push((*key, *val)); + /// } + /// + /// // The `Iter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [("a", 1), ("b", 2), ("c", 3)]); + /// + /// assert_eq!(map.len(), 3); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> Iter<'_, K, V> { + // Here we tie the lifetime of self to the iter. + unsafe { + Iter { + inner: self.table.iter(), + marker: PhantomData, + } + } + } + + /// An iterator visiting all key-value pairs in arbitrary order, + /// with mutable references to the values. + /// The iterator element type is `(&'a K, &'a mut V)`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// // Update all values + /// for (_, val) in map.iter_mut() { + /// *val *= 2; + /// } + /// + /// assert_eq!(map.len(), 3); + /// let mut vec: Vec<(&str, i32)> = Vec::new(); + /// + /// for (key, val) in &map { + /// println!("key: {} val: {}", key, val); + /// vec.push((*key, *val)); + /// } + /// + /// // The `Iter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [("a", 2), ("b", 4), ("c", 6)]); + /// + /// assert_eq!(map.len(), 3); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + // Here we tie the lifetime of self to the iter. + unsafe { + IterMut { + inner: self.table.iter(), + marker: PhantomData, + } + } + } + + #[cfg(test)] + #[cfg_attr(feature = "inline-more", inline)] + fn raw_capacity(&self) -> usize { + self.table.buckets() + } + + /// Returns the number of elements in the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// assert_eq!(a.len(), 0); + /// a.insert(1, "a"); + /// assert_eq!(a.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn len(&self) -> usize { + self.table.len() + } + + /// Returns `true` if the map contains no elements. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// assert!(a.is_empty()); + /// a.insert(1, "a"); + /// assert!(!a.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Clears the map, returning all key-value pairs as an iterator. Keeps the + /// allocated memory for reuse. + /// + /// If the returned iterator is dropped before being fully consumed, it + /// drops the remaining key-value pairs. The returned iterator keeps a + /// mutable borrow on the vector to optimize its implementation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// let capacity_before_drain = a.capacity(); + /// + /// for (k, v) in a.drain().take(1) { + /// assert!(k == 1 || k == 2); + /// assert!(v == "a" || v == "b"); + /// } + /// + /// // As we can see, the map is empty and contains no element. + /// assert!(a.is_empty() && a.len() == 0); + /// // But map capacity is equal to old one. + /// assert_eq!(a.capacity(), capacity_before_drain); + /// + /// let mut a = HashMap::new(); + /// a.insert(1, "a"); + /// a.insert(2, "b"); + /// + /// { // Iterator is dropped without being consumed. + /// let d = a.drain(); + /// } + /// + /// // But the map is empty even if we do not use Drain iterator. + /// assert!(a.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain(&mut self) -> Drain<'_, K, V, A> { + Drain { + inner: self.table.drain(), + } + } + + /// Retains only the elements specified by the predicate. Keeps the + /// allocated memory for reuse. + /// + /// In other words, remove all pairs `(k, v)` such that `f(&k, &mut v)` returns `false`. + /// The elements are visited in unsorted (and unspecified) order. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = (0..8).map(|x|(x, x*10)).collect(); + /// assert_eq!(map.len(), 8); + /// let capacity_before_retain = map.capacity(); + /// + /// map.retain(|&k, _| k % 2 == 0); + /// + /// // We can see, that the number of elements inside map is changed. + /// assert_eq!(map.len(), 4); + /// // But map capacity is equal to old one. + /// assert_eq!(map.capacity(), capacity_before_retain); + /// + /// let mut vec: Vec<(i32, i32)> = map.iter().map(|(&k, &v)| (k, v)).collect(); + /// vec.sort_unstable(); + /// assert_eq!(vec, [(0, 0), (2, 20), (4, 40), (6, 60)]); + /// ``` + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&K, &mut V) -> bool, + { + // Here we only use `iter` as a temporary, preventing use-after-free + unsafe { + for item in self.table.iter() { + let &mut (ref key, ref mut value) = item.as_mut(); + if !f(key, value) { + self.table.erase(item); + } + } + } + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// In other words, move all pairs `(k, v)` such that `f(&k, &mut v)` returns `true` out + /// into another iterator. + /// + /// Note that `drain_filter` lets you mutate every value in the filter closure, regardless of + /// whether you choose to keep or remove it. + /// + /// When the returned DrainedFilter is dropped, any remaining elements that satisfy + /// the predicate are dropped from the table. + /// + /// It is unspecified how many more elements will be subjected to the closure + /// if a panic occurs in the closure, or a panic occurs while dropping an element, + /// or if the `DrainFilter` value is leaked. + /// + /// Keeps the allocated memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); + /// let capacity_before_drain_filter = map.capacity(); + /// let drained: HashMap = map.drain_filter(|k, _v| k % 2 == 0).collect(); + /// + /// let mut evens = drained.keys().cloned().collect::>(); + /// let mut odds = map.keys().cloned().collect::>(); + /// evens.sort(); + /// odds.sort(); + /// + /// assert_eq!(evens, vec![0, 2, 4, 6]); + /// assert_eq!(odds, vec![1, 3, 5, 7]); + /// // Map capacity is equal to old one. + /// assert_eq!(map.capacity(), capacity_before_drain_filter); + /// + /// let mut map: HashMap = (0..8).map(|x| (x, x)).collect(); + /// + /// { // Iterator is dropped without being consumed. + /// let d = map.drain_filter(|k, _v| k % 2 != 0); + /// } + /// + /// // But the map lens have been reduced by half + /// // even if we do not use DrainFilter iterator. + /// assert_eq!(map.len(), 4); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain_filter(&mut self, f: F) -> DrainFilter<'_, K, V, F, A> + where + F: FnMut(&K, &mut V) -> bool, + { + DrainFilter { + f, + inner: DrainFilterInner { + iter: unsafe { self.table.iter() }, + table: &mut self.table, + }, + } + } + + /// Clears the map, removing all key-value pairs. Keeps the allocated memory + /// for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut a = HashMap::new(); + /// a.insert(1, "a"); + /// let capacity_before_clear = a.capacity(); + /// + /// a.clear(); + /// + /// // Map is empty. + /// assert!(a.is_empty()); + /// // But map capacity is equal to old one. + /// assert_eq!(a.capacity(), capacity_before_clear); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear(&mut self) { + self.table.clear(); + } + + /// Creates a consuming iterator visiting all the keys in arbitrary order. + /// The map cannot be used after calling this. + /// The iterator element type is `K`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// let mut vec: Vec<&str> = map.into_keys().collect(); + /// + /// // The `IntoKeys` iterator produces keys in arbitrary order, so the + /// // keys must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, ["a", "b", "c"]); + /// ``` + #[inline] + pub fn into_keys(self) -> IntoKeys { + IntoKeys { + inner: self.into_iter(), + } + } + + /// Creates a consuming iterator visiting all the values in arbitrary order. + /// The map cannot be used after calling this. + /// The iterator element type is `V`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert("a", 1); + /// map.insert("b", 2); + /// map.insert("c", 3); + /// + /// let mut vec: Vec = map.into_values().collect(); + /// + /// // The `IntoValues` iterator produces values in arbitrary order, so + /// // the values must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [1, 2, 3]); + /// ``` + #[inline] + pub fn into_values(self) -> IntoValues { + IntoValues { + inner: self.into_iter(), + } + } +} + +impl HashMap +where + K: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the `HashMap`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Panics + /// + /// Panics if the new allocation size overflows [`usize`]. + /// + /// [`usize`]: https://doc.rust-lang.org/std/primitive.usize.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<&str, i32> = HashMap::new(); + /// // Map is empty and doesn't allocate memory + /// assert_eq!(map.capacity(), 0); + /// + /// map.reserve(10); + /// + /// // And now map can hold at least 10 elements + /// assert!(map.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve(&mut self, additional: usize) { + self.table + .reserve(additional, make_hasher::(&self.hash_builder)); + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashMap`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, isize> = HashMap::new(); + /// // Map is empty and doesn't allocate memory + /// assert_eq!(map.capacity(), 0); + /// + /// map.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?"); + /// + /// // And now map can hold at least 10 elements + /// assert!(map.capacity() >= 10); + /// ``` + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned: + /// ``` + /// # fn test() { + /// use hashbrown::HashMap; + /// use hashbrown::TryReserveError; + /// let mut map: HashMap = HashMap::new(); + /// + /// match map.try_reserve(usize::MAX) { + /// Err(error) => match error { + /// TryReserveError::CapacityOverflow => {} + /// _ => panic!("TryReserveError::AllocError ?"), + /// }, + /// _ => panic!(), + /// } + /// # } + /// # fn main() { + /// # #[cfg(not(miri))] + /// # test() + /// # } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.table + .try_reserve(additional, make_hasher::(&self.hash_builder)) + } + + /// Shrinks the capacity of the map as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::with_capacity(100); + /// map.insert(1, 2); + /// map.insert(3, 4); + /// assert!(map.capacity() >= 100); + /// map.shrink_to_fit(); + /// assert!(map.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to_fit(&mut self) { + self.table + .shrink_to(0, make_hasher::(&self.hash_builder)); + } + + /// Shrinks the capacity of the map with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// This function does nothing if the current capacity is smaller than the + /// supplied minimum capacity. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::with_capacity(100); + /// map.insert(1, 2); + /// map.insert(3, 4); + /// assert!(map.capacity() >= 100); + /// map.shrink_to(10); + /// assert!(map.capacity() >= 10); + /// map.shrink_to(0); + /// assert!(map.capacity() >= 2); + /// map.shrink_to(10); + /// assert!(map.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.table + .shrink_to(min_capacity, make_hasher::(&self.hash_builder)); + } + + /// Gets the given key's corresponding entry in the map for in-place manipulation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut letters = HashMap::new(); + /// + /// for ch in "a short treatise on fungi".chars() { + /// let counter = letters.entry(ch).or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(letters[&'s'], 2); + /// assert_eq!(letters[&'t'], 3); + /// assert_eq!(letters[&'u'], 1); + /// assert_eq!(letters.get(&'y'), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S, A> { + let hash = make_insert_hash::(&self.hash_builder, &key); + if let Some(elem) = self.table.find(hash, equivalent_key(&key)) { + Entry::Occupied(OccupiedEntry { + hash, + key: Some(key), + elem, + table: self, + }) + } else { + Entry::Vacant(VacantEntry { + hash, + key, + table: self, + }) + } + } + + /// Gets the given key's corresponding entry by reference in the map for in-place manipulation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut words: HashMap = HashMap::new(); + /// let source = ["poneyland", "horseyland", "poneyland", "poneyland"]; + /// for (i, &s) in source.iter().enumerate() { + /// let counter = words.entry_ref(s).or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(words["poneyland"], 3); + /// assert_eq!(words["horseyland"], 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn entry_ref<'a, 'b, Q: ?Sized>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S, A> + where + K: Borrow, + Q: Hash + Eq, + { + let hash = make_hash::(&self.hash_builder, key); + if let Some(elem) = self.table.find(hash, equivalent_key(key)) { + EntryRef::Occupied(OccupiedEntryRef { + hash, + key: Some(KeyOrRef::Borrowed(key)), + elem, + table: self, + }) + } else { + EntryRef::Vacant(VacantEntryRef { + hash, + key: KeyOrRef::Borrowed(key), + table: self, + }) + } + } + + /// Returns a reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.get(&1), Some(&"a")); + /// assert_eq!(map.get(&2), None); + /// ``` + #[inline] + pub fn get(&self, k: &Q) -> Option<&V> + where + K: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner(k) { + Some(&(_, ref v)) => Some(v), + None => None, + } + } + + /// Returns the key-value pair corresponding to the supplied key. + /// + /// The supplied key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.get_key_value(&1), Some((&1, &"a"))); + /// assert_eq!(map.get_key_value(&2), None); + /// ``` + #[inline] + pub fn get_key_value(&self, k: &Q) -> Option<(&K, &V)> + where + K: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner(k) { + Some(&(ref key, ref value)) => Some((key, value)), + None => None, + } + } + + #[inline] + fn get_inner(&self, k: &Q) -> Option<&(K, V)> + where + K: Borrow, + Q: Hash + Eq, + { + if self.table.is_empty() { + None + } else { + let hash = make_hash::(&self.hash_builder, k); + self.table.get(hash, equivalent_key(k)) + } + } + + /// Returns the key-value pair corresponding to the supplied key, with a mutable reference to value. + /// + /// The supplied key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// let (k, v) = map.get_key_value_mut(&1).unwrap(); + /// assert_eq!(k, &1); + /// assert_eq!(v, &mut "a"); + /// *v = "b"; + /// assert_eq!(map.get_key_value_mut(&1), Some((&1, &mut "b"))); + /// assert_eq!(map.get_key_value_mut(&2), None); + /// ``` + #[inline] + pub fn get_key_value_mut(&mut self, k: &Q) -> Option<(&K, &mut V)> + where + K: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner_mut(k) { + Some(&mut (ref key, ref mut value)) => Some((key, value)), + None => None, + } + } + + /// Returns `true` if the map contains a value for the specified key. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// assert_eq!(map.contains_key(&1), true); + /// assert_eq!(map.contains_key(&2), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn contains_key(&self, k: &Q) -> bool + where + K: Borrow, + Q: Hash + Eq, + { + self.get_inner(k).is_some() + } + + /// Returns a mutable reference to the value corresponding to the key. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, "a"); + /// if let Some(x) = map.get_mut(&1) { + /// *x = "b"; + /// } + /// assert_eq!(map[&1], "b"); + /// + /// assert_eq!(map.get_mut(&2), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self, k: &Q) -> Option<&mut V> + where + K: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.get_inner_mut(k) { + Some(&mut (_, ref mut v)) => Some(v), + None => None, + } + } + + #[inline] + fn get_inner_mut(&mut self, k: &Q) -> Option<&mut (K, V)> + where + K: Borrow, + Q: Hash + Eq, + { + if self.table.is_empty() { + None + } else { + let hash = make_hash::(&self.hash_builder, k); + self.table.get_mut(hash, equivalent_key(k)) + } + } + + /// Attempts to get mutable references to `N` values in the map at once. + /// + /// Returns an array of length `N` with the results of each query. For soundness, at most one + /// mutable reference will be returned to any value. `None` will be returned if any of the + /// keys are duplicates or missing. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut libraries = HashMap::new(); + /// libraries.insert("Bodleian Library".to_string(), 1602); + /// libraries.insert("Athenæum".to_string(), 1807); + /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); + /// libraries.insert("Library of Congress".to_string(), 1800); + /// + /// let got = libraries.get_many_mut([ + /// "Athenæum", + /// "Library of Congress", + /// ]); + /// assert_eq!( + /// got, + /// Some([ + /// &mut 1807, + /// &mut 1800, + /// ]), + /// ); + /// + /// // Missing keys result in None + /// let got = libraries.get_many_mut([ + /// "Athenæum", + /// "New York Public Library", + /// ]); + /// assert_eq!(got, None); + /// + /// // Duplicate keys result in None + /// let got = libraries.get_many_mut([ + /// "Athenæum", + /// "Athenæum", + /// ]); + /// assert_eq!(got, None); + /// ``` + pub fn get_many_mut(&mut self, ks: [&Q; N]) -> Option<[&'_ mut V; N]> + where + K: Borrow, + Q: Hash + Eq, + { + self.get_many_mut_inner(ks).map(|res| res.map(|(_, v)| v)) + } + + /// Attempts to get mutable references to `N` values in the map at once, without validating that + /// the values are unique. + /// + /// Returns an array of length `N` with the results of each query. `None` will be returned if + /// any of the keys are missing. + /// + /// For a safe alternative see [`get_many_mut`](`HashMap::get_many_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut libraries = HashMap::new(); + /// libraries.insert("Bodleian Library".to_string(), 1602); + /// libraries.insert("Athenæum".to_string(), 1807); + /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); + /// libraries.insert("Library of Congress".to_string(), 1800); + /// + /// let got = libraries.get_many_mut([ + /// "Athenæum", + /// "Library of Congress", + /// ]); + /// assert_eq!( + /// got, + /// Some([ + /// &mut 1807, + /// &mut 1800, + /// ]), + /// ); + /// + /// // Missing keys result in None + /// let got = libraries.get_many_mut([ + /// "Athenæum", + /// "New York Public Library", + /// ]); + /// assert_eq!(got, None); + /// ``` + pub unsafe fn get_many_unchecked_mut( + &mut self, + ks: [&Q; N], + ) -> Option<[&'_ mut V; N]> + where + K: Borrow, + Q: Hash + Eq, + { + self.get_many_unchecked_mut_inner(ks) + .map(|res| res.map(|(_, v)| v)) + } + + /// Attempts to get mutable references to `N` values in the map at once, with immutable + /// references to the corresponding keys. + /// + /// Returns an array of length `N` with the results of each query. For soundness, at most one + /// mutable reference will be returned to any value. `None` will be returned if any of the keys + /// are duplicates or missing. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut libraries = HashMap::new(); + /// libraries.insert("Bodleian Library".to_string(), 1602); + /// libraries.insert("Athenæum".to_string(), 1807); + /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); + /// libraries.insert("Library of Congress".to_string(), 1800); + /// + /// let got = libraries.get_many_key_value_mut([ + /// "Bodleian Library", + /// "Herzogin-Anna-Amalia-Bibliothek", + /// ]); + /// assert_eq!( + /// got, + /// Some([ + /// (&"Bodleian Library".to_string(), &mut 1602), + /// (&"Herzogin-Anna-Amalia-Bibliothek".to_string(), &mut 1691), + /// ]), + /// ); + /// // Missing keys result in None + /// let got = libraries.get_many_key_value_mut([ + /// "Bodleian Library", + /// "Gewandhaus", + /// ]); + /// assert_eq!(got, None); + /// + /// // Duplicate keys result in None + /// let got = libraries.get_many_key_value_mut([ + /// "Bodleian Library", + /// "Herzogin-Anna-Amalia-Bibliothek", + /// "Herzogin-Anna-Amalia-Bibliothek", + /// ]); + /// assert_eq!(got, None); + /// ``` + pub fn get_many_key_value_mut( + &mut self, + ks: [&Q; N], + ) -> Option<[(&'_ K, &'_ mut V); N]> + where + K: Borrow, + Q: Hash + Eq, + { + self.get_many_mut_inner(ks) + .map(|res| res.map(|(k, v)| (&*k, v))) + } + + /// Attempts to get mutable references to `N` values in the map at once, with immutable + /// references to the corresponding keys, without validating that the values are unique. + /// + /// Returns an array of length `N` with the results of each query. `None` will be returned if + /// any of the keys are missing. + /// + /// For a safe alternative see [`get_many_key_value_mut`](`HashMap::get_many_key_value_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut libraries = HashMap::new(); + /// libraries.insert("Bodleian Library".to_string(), 1602); + /// libraries.insert("Athenæum".to_string(), 1807); + /// libraries.insert("Herzogin-Anna-Amalia-Bibliothek".to_string(), 1691); + /// libraries.insert("Library of Congress".to_string(), 1800); + /// + /// let got = libraries.get_many_key_value_mut([ + /// "Bodleian Library", + /// "Herzogin-Anna-Amalia-Bibliothek", + /// ]); + /// assert_eq!( + /// got, + /// Some([ + /// (&"Bodleian Library".to_string(), &mut 1602), + /// (&"Herzogin-Anna-Amalia-Bibliothek".to_string(), &mut 1691), + /// ]), + /// ); + /// // Missing keys result in None + /// let got = libraries.get_many_key_value_mut([ + /// "Bodleian Library", + /// "Gewandhaus", + /// ]); + /// assert_eq!(got, None); + /// ``` + pub unsafe fn get_many_key_value_unchecked_mut( + &mut self, + ks: [&Q; N], + ) -> Option<[(&'_ K, &'_ mut V); N]> + where + K: Borrow, + Q: Hash + Eq, + { + self.get_many_unchecked_mut_inner(ks) + .map(|res| res.map(|(k, v)| (&*k, v))) + } + + fn get_many_mut_inner( + &mut self, + ks: [&Q; N], + ) -> Option<[&'_ mut (K, V); N]> + where + K: Borrow, + Q: Hash + Eq, + { + let hashes = self.build_hashes_inner(ks); + self.table + .get_many_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow())) + } + + unsafe fn get_many_unchecked_mut_inner( + &mut self, + ks: [&Q; N], + ) -> Option<[&'_ mut (K, V); N]> + where + K: Borrow, + Q: Hash + Eq, + { + let hashes = self.build_hashes_inner(ks); + self.table + .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow())) + } + + fn build_hashes_inner(&self, ks: [&Q; N]) -> [u64; N] + where + K: Borrow, + Q: Hash + Eq, + { + let mut hashes = [0_u64; N]; + for i in 0..N { + hashes[i] = make_hash::(&self.hash_builder, ks[i]); + } + hashes + } + + /// Inserts a key-value pair into the map. + /// + /// If the map did not have this key present, [`None`] is returned. + /// + /// If the map did have this key present, the value is updated, and the old + /// value is returned. The key is not updated, though; this matters for + /// types that can be `==` without being identical. See the [`std::collections`] + /// [module-level documentation] for more. + /// + /// [`None`]: https://doc.rust-lang.org/std/option/enum.Option.html#variant.None + /// [`std::collections`]: https://doc.rust-lang.org/std/collections/index.html + /// [module-level documentation]: https://doc.rust-lang.org/std/collections/index.html#insert-and-complex-keys + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// assert_eq!(map.insert(37, "a"), None); + /// assert_eq!(map.is_empty(), false); + /// + /// map.insert(37, "b"); + /// assert_eq!(map.insert(37, "c"), Some("b")); + /// assert_eq!(map[&37], "c"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, k: K, v: V) -> Option { + let hash = make_insert_hash::(&self.hash_builder, &k); + if let Some((_, item)) = self.table.get_mut(hash, equivalent_key(&k)) { + Some(mem::replace(item, v)) + } else { + self.table + .insert(hash, (k, v), make_hasher::(&self.hash_builder)); + None + } + } + + /// Insert a key-value pair into the map without checking + /// if the key already exists in the map. + /// + /// Returns a reference to the key and value just inserted. + /// + /// This operation is safe if a key does not exist in the map. + /// + /// However, if a key exists in the map already, the behavior is unspecified: + /// this operation may panic, loop forever, or any following operation with the map + /// may panic, loop forever or return arbitrary result. + /// + /// That said, this operation (and following operations) are guaranteed to + /// not violate memory safety. + /// + /// This operation is faster than regular insert, because it does not perform + /// lookup before insertion. + /// + /// This operation is useful during initial population of the map. + /// For example, when constructing a map from another map, we know + /// that keys are unique. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map1 = HashMap::new(); + /// assert_eq!(map1.insert(1, "a"), None); + /// assert_eq!(map1.insert(2, "b"), None); + /// assert_eq!(map1.insert(3, "c"), None); + /// assert_eq!(map1.len(), 3); + /// + /// let mut map2 = HashMap::new(); + /// + /// for (key, value) in map1.into_iter() { + /// map2.insert_unique_unchecked(key, value); + /// } + /// + /// let (key, value) = map2.insert_unique_unchecked(4, "d"); + /// assert_eq!(key, &4); + /// assert_eq!(value, &mut "d"); + /// *value = "e"; + /// + /// assert_eq!(map2[&1], "a"); + /// assert_eq!(map2[&2], "b"); + /// assert_eq!(map2[&3], "c"); + /// assert_eq!(map2[&4], "e"); + /// assert_eq!(map2.len(), 4); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_unique_unchecked(&mut self, k: K, v: V) -> (&K, &mut V) { + let hash = make_insert_hash::(&self.hash_builder, &k); + let bucket = self + .table + .insert(hash, (k, v), make_hasher::(&self.hash_builder)); + let (k_ref, v_ref) = unsafe { bucket.as_mut() }; + (k_ref, v_ref) + } + + /// Tries to insert a key-value pair into the map, and returns + /// a mutable reference to the value in the entry. + /// + /// # Errors + /// + /// If the map already had this key present, nothing is updated, and + /// an error containing the occupied entry and the value is returned. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::OccupiedError; + /// + /// let mut map = HashMap::new(); + /// assert_eq!(map.try_insert(37, "a").unwrap(), &"a"); + /// + /// match map.try_insert(37, "b") { + /// Err(OccupiedError { entry, value }) => { + /// assert_eq!(entry.key(), &37); + /// assert_eq!(entry.get(), &"a"); + /// assert_eq!(value, "b"); + /// } + /// _ => panic!() + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_insert( + &mut self, + key: K, + value: V, + ) -> Result<&mut V, OccupiedError<'_, K, V, S, A>> { + match self.entry(key) { + Entry::Occupied(entry) => Err(OccupiedError { entry, value }), + Entry::Vacant(entry) => Ok(entry.insert(value)), + } + } + + /// Removes a key from the map, returning the value at the key if the key + /// was previously in the map. Keeps the allocated memory for reuse. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// // The map is empty + /// assert!(map.is_empty() && map.capacity() == 0); + /// + /// map.insert(1, "a"); + /// let capacity_before_remove = map.capacity(); + /// + /// assert_eq!(map.remove(&1), Some("a")); + /// assert_eq!(map.remove(&1), None); + /// + /// // Now map holds none elements but capacity is equal to the old one + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(&mut self, k: &Q) -> Option + where + K: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.remove_entry(k) { + Some((_, v)) => Some(v), + None => None, + } + } + + /// Removes a key from the map, returning the stored key and value if the + /// key was previously in the map. Keeps the allocated memory for reuse. + /// + /// The key may be any borrowed form of the map's key type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the key type. + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// // The map is empty + /// assert!(map.is_empty() && map.capacity() == 0); + /// + /// map.insert(1, "a"); + /// let capacity_before_remove = map.capacity(); + /// + /// assert_eq!(map.remove_entry(&1), Some((1, "a"))); + /// assert_eq!(map.remove(&1), None); + /// + /// // Now map hold none elements but capacity is equal to the old one + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(&mut self, k: &Q) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq, + { + let hash = make_hash::(&self.hash_builder, k); + self.table.remove_entry(hash, equivalent_key(k)) + } +} + +impl HashMap { + /// Creates a raw entry builder for the HashMap. + /// + /// Raw entries provide the lowest level of control for searching and + /// manipulating a map. They must be manually initialized with a hash and + /// then manually searched. After this, insertions into a vacant entry + /// still require an owned key to be provided. + /// + /// Raw entries are useful for such exotic situations as: + /// + /// * Hash memoization + /// * Deferring the creation of an owned key until it is known to be required + /// * Using a search key that doesn't work with the Borrow trait + /// * Using custom comparison logic without newtype wrappers + /// + /// Because raw entries provide much more low-level control, it's much easier + /// to put the HashMap into an inconsistent state which, while memory-safe, + /// will cause the map to produce seemingly random results. Higher-level and + /// more foolproof APIs like `entry` should be preferred when possible. + /// + /// In particular, the hash used to initialized the raw entry must still be + /// consistent with the hash of the key that is ultimately stored in the entry. + /// This is because implementations of HashMap may need to recompute hashes + /// when resizing, at which point only the keys are available. + /// + /// Raw entries give mutable access to the keys. This must not be used + /// to modify how the key would compare or hash, as the map will not re-evaluate + /// where the key should go, meaning the keys may become "lost" if their + /// location does not reflect their state. For instance, if you change a key + /// so that the map now contains keys which compare equal, search may start + /// acting erratically, with two keys randomly masking each other. Implementations + /// are free to assume this doesn't happen (within the limits of memory-safety). + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map = HashMap::new(); + /// map.extend([("a", 100), ("b", 200), ("c", 300)]); + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// // Existing key (insert and update) + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(mut view) => { + /// assert_eq!(view.get(), &100); + /// let v = view.get_mut(); + /// let new_v = (*v) * 10; + /// *v = new_v; + /// assert_eq!(view.insert(1111), 1000); + /// } + /// } + /// + /// assert_eq!(map[&"a"], 1111); + /// assert_eq!(map.len(), 3); + /// + /// // Existing key (take) + /// let hash = compute_hash(map.hasher(), &"c"); + /// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &"c") { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(view) => { + /// assert_eq!(view.remove_entry(), ("c", 300)); + /// } + /// } + /// assert_eq!(map.raw_entry().from_key(&"c"), None); + /// assert_eq!(map.len(), 2); + /// + /// // Nonexistent key (insert and update) + /// let key = "d"; + /// let hash = compute_hash(map.hasher(), &key); + /// match map.raw_entry_mut().from_hash(hash, |q| *q == key) { + /// RawEntryMut::Occupied(_) => unreachable!(), + /// RawEntryMut::Vacant(view) => { + /// let (k, value) = view.insert("d", 4000); + /// assert_eq!((*k, *value), ("d", 4000)); + /// *value = 40000; + /// } + /// } + /// assert_eq!(map[&"d"], 40000); + /// assert_eq!(map.len(), 3); + /// + /// match map.raw_entry_mut().from_hash(hash, |q| *q == key) { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(view) => { + /// assert_eq!(view.remove_entry(), ("d", 40000)); + /// } + /// } + /// assert_eq!(map.get(&"d"), None); + /// assert_eq!(map.len(), 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn raw_entry_mut(&mut self) -> RawEntryBuilderMut<'_, K, V, S, A> { + RawEntryBuilderMut { map: self } + } + + /// Creates a raw immutable entry builder for the HashMap. + /// + /// Raw entries provide the lowest level of control for searching and + /// manipulating a map. They must be manually initialized with a hash and + /// then manually searched. + /// + /// This is useful for + /// * Hash memoization + /// * Using a search key that doesn't work with the Borrow trait + /// * Using custom comparison logic without newtype wrappers + /// + /// Unless you are in such a situation, higher-level and more foolproof APIs like + /// `get` should be preferred. + /// + /// Immutable raw entries have very limited use; you might instead want `raw_entry_mut`. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.extend([("a", 100), ("b", 200), ("c", 300)]); + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// for k in ["a", "b", "c", "d", "e", "f"] { + /// let hash = compute_hash(map.hasher(), k); + /// let v = map.get(&k).cloned(); + /// let kv = v.as_ref().map(|v| (&k, v)); + /// + /// println!("Key: {} and value: {:?}", k, v); + /// + /// assert_eq!(map.raw_entry().from_key(&k), kv); + /// assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); + /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn raw_entry(&self) -> RawEntryBuilder<'_, K, V, S, A> { + RawEntryBuilder { map: self } + } + + /// Returns a mutable reference to the [`RawTable`] used underneath [`HashMap`]. + /// This function is only available if the `raw` feature of the crate is enabled. + /// + /// # Note + /// + /// Calling the function safe, but using raw hash table API's may require + /// unsafe functions or blocks. + /// + /// `RawTable` API gives the lowest level of control under the map that can be useful + /// for extending the HashMap's API, but may lead to *[undefined behavior]*. + /// + /// [`HashMap`]: struct.HashMap.html + /// [`RawTable`]: raw/struct.RawTable.html + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.extend([("a", 10), ("b", 20), ("c", 30)]); + /// assert_eq!(map.len(), 3); + /// + /// // Let's imagine that we have a value and a hash of the key, but not the key itself. + /// // However, if you want to remove the value from the map by hash and value, and you + /// // know exactly that the value is unique, then you can create a function like this: + /// fn remove_by_hash( + /// map: &mut HashMap, + /// hash: u64, + /// is_match: F, + /// ) -> Option<(K, V)> + /// where + /// F: Fn(&(K, V)) -> bool, + /// { + /// let raw_table = map.raw_table(); + /// match raw_table.find(hash, is_match) { + /// Some(bucket) => Some(unsafe { raw_table.remove(bucket) }), + /// None => None, + /// } + /// } + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let hash = compute_hash(map.hasher(), "a"); + /// assert_eq!(remove_by_hash(&mut map, hash, |(_, v)| *v == 10), Some(("a", 10))); + /// assert_eq!(map.get(&"a"), None); + /// assert_eq!(map.len(), 2); + /// ``` + #[cfg(feature = "raw")] + #[cfg_attr(feature = "inline-more", inline)] + pub fn raw_table(&mut self) -> &mut RawTable<(K, V), A> { + &mut self.table + } +} + +impl PartialEq for HashMap +where + K: Eq + Hash, + V: PartialEq, + S: BuildHasher, + A: Allocator + Clone, +{ + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false; + } + + self.iter() + .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) + } +} + +impl Eq for HashMap +where + K: Eq + Hash, + V: Eq, + S: BuildHasher, + A: Allocator + Clone, +{ +} + +impl Debug for HashMap +where + K: Debug, + V: Debug, + A: Allocator + Clone, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_map().entries(self.iter()).finish() + } +} + +impl Default for HashMap +where + S: Default, + A: Default + Allocator + Clone, +{ + /// Creates an empty `HashMap`, with the `Default` value for the hasher and allocator. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use std::collections::hash_map::RandomState; + /// + /// // You can specify all types of HashMap, including hasher and allocator. + /// // Created map is empty and don't allocate memory + /// let map: HashMap = Default::default(); + /// assert_eq!(map.capacity(), 0); + /// let map: HashMap = HashMap::default(); + /// assert_eq!(map.capacity(), 0); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self::with_hasher_in(Default::default(), Default::default()) + } +} + +impl Index<&Q> for HashMap +where + K: Eq + Hash + Borrow, + Q: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + type Output = V; + + /// Returns a reference to the value corresponding to the supplied key. + /// + /// # Panics + /// + /// Panics if the key is not present in the `HashMap`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let map: HashMap<_, _> = [("a", "One"), ("b", "Two")].into(); + /// + /// assert_eq!(map[&"a"], "One"); + /// assert_eq!(map[&"b"], "Two"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn index(&self, key: &Q) -> &V { + self.get(key).expect("no entry found for key") + } +} + +// The default hasher is used to match the std implementation signature +#[cfg(feature = "ahash")] +impl From<[(K, V); N]> for HashMap +where + K: Eq + Hash, + A: Default + Allocator + Clone, +{ + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let map1 = HashMap::from([(1, 2), (3, 4)]); + /// let map2: HashMap<_, _> = [(1, 2), (3, 4)].into(); + /// assert_eq!(map1, map2); + /// ``` + fn from(arr: [(K, V); N]) -> Self { + arr.into_iter().collect() + } +} + +/// An iterator over the entries of a `HashMap` in arbitrary order. +/// The iterator element type is `(&'a K, &'a V)`. +/// +/// This `struct` is created by the [`iter`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`iter`]: struct.HashMap.html#method.iter +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut iter = map.iter(); +/// let mut vec = vec![iter.next(), iter.next(), iter.next()]; +/// +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some((&1, &"a")), Some((&2, &"b")), Some((&3, &"c"))]); +/// +/// // It is fused iterator +/// assert_eq!(iter.next(), None); +/// assert_eq!(iter.next(), None); +/// ``` +pub struct Iter<'a, K, V> { + inner: RawIter<(K, V)>, + marker: PhantomData<(&'a K, &'a V)>, +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Iter<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Iter { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl fmt::Debug for Iter<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A mutable iterator over the entries of a `HashMap` in arbitrary order. +/// The iterator element type is `(&'a K, &'a mut V)`. +/// +/// This `struct` is created by the [`iter_mut`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`iter_mut`]: struct.HashMap.html#method.iter_mut +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let mut map: HashMap<_, _> = [(1, "One".to_owned()), (2, "Two".into())].into(); +/// +/// let mut iter = map.iter_mut(); +/// iter.next().map(|(_, v)| v.push_str(" Mississippi")); +/// iter.next().map(|(_, v)| v.push_str(" Mississippi")); +/// +/// // It is fused iterator +/// assert_eq!(iter.next(), None); +/// assert_eq!(iter.next(), None); +/// +/// assert_eq!(map.get(&1).unwrap(), &"One Mississippi".to_owned()); +/// assert_eq!(map.get(&2).unwrap(), &"Two Mississippi".to_owned()); +/// ``` +pub struct IterMut<'a, K, V> { + inner: RawIter<(K, V)>, + // To ensure invariance with respect to V + marker: PhantomData<(&'a K, &'a mut V)>, +} + +// We override the default Send impl which has K: Sync instead of K: Send. Both +// are correct, but this one is more general since it allows keys which +// implement Send but not Sync. +unsafe impl Send for IterMut<'_, K, V> {} + +impl IterMut<'_, K, V> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +/// An owning iterator over the entries of a `HashMap` in arbitrary order. +/// The iterator element type is `(K, V)`. +/// +/// This `struct` is created by the [`into_iter`] method on [`HashMap`] +/// (provided by the [`IntoIterator`] trait). See its documentation for more. +/// The map cannot be used after calling that method. +/// +/// [`into_iter`]: struct.HashMap.html#method.into_iter +/// [`HashMap`]: struct.HashMap.html +/// [`IntoIterator`]: https://doc.rust-lang.org/core/iter/trait.IntoIterator.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut iter = map.into_iter(); +/// let mut vec = vec![iter.next(), iter.next(), iter.next()]; +/// +/// // The `IntoIter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some((1, "a")), Some((2, "b")), Some((3, "c"))]); +/// +/// // It is fused iterator +/// assert_eq!(iter.next(), None); +/// assert_eq!(iter.next(), None); +/// ``` +pub struct IntoIter { + inner: RawIntoIter<(K, V), A>, +} + +impl IntoIter { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + inner: self.inner.iter(), + marker: PhantomData, + } + } +} + +/// An owning iterator over the keys of a `HashMap` in arbitrary order. +/// The iterator element type is `K`. +/// +/// This `struct` is created by the [`into_keys`] method on [`HashMap`]. +/// See its documentation for more. +/// The map cannot be used after calling that method. +/// +/// [`into_keys`]: struct.HashMap.html#method.into_keys +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut keys = map.into_keys(); +/// let mut vec = vec![keys.next(), keys.next(), keys.next()]; +/// +/// // The `IntoKeys` iterator produces keys in arbitrary order, so the +/// // keys must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some(1), Some(2), Some(3)]); +/// +/// // It is fused iterator +/// assert_eq!(keys.next(), None); +/// assert_eq!(keys.next(), None); +/// ``` +pub struct IntoKeys { + inner: IntoIter, +} + +impl Iterator for IntoKeys { + type Item = K; + + #[inline] + fn next(&mut self) -> Option { + self.inner.next().map(|(k, _)| k) + } + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl ExactSizeIterator for IntoKeys { + #[inline] + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for IntoKeys {} + +impl fmt::Debug for IntoKeys { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.inner.iter().map(|(k, _)| k)) + .finish() + } +} + +/// An owning iterator over the values of a `HashMap` in arbitrary order. +/// The iterator element type is `V`. +/// +/// This `struct` is created by the [`into_values`] method on [`HashMap`]. +/// See its documentation for more. The map cannot be used after calling that method. +/// +/// [`into_values`]: struct.HashMap.html#method.into_values +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut values = map.into_values(); +/// let mut vec = vec![values.next(), values.next(), values.next()]; +/// +/// // The `IntoValues` iterator produces values in arbitrary order, so +/// // the values must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some("a"), Some("b"), Some("c")]); +/// +/// // It is fused iterator +/// assert_eq!(values.next(), None); +/// assert_eq!(values.next(), None); +/// ``` +pub struct IntoValues { + inner: IntoIter, +} + +impl Iterator for IntoValues { + type Item = V; + + #[inline] + fn next(&mut self) -> Option { + self.inner.next().map(|(_, v)| v) + } + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl ExactSizeIterator for IntoValues { + #[inline] + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for IntoValues {} + +impl fmt::Debug for IntoValues { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.inner.iter().map(|(_, v)| v)) + .finish() + } +} + +/// An iterator over the keys of a `HashMap` in arbitrary order. +/// The iterator element type is `&'a K`. +/// +/// This `struct` is created by the [`keys`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`keys`]: struct.HashMap.html#method.keys +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut keys = map.keys(); +/// let mut vec = vec![keys.next(), keys.next(), keys.next()]; +/// +/// // The `Keys` iterator produces keys in arbitrary order, so the +/// // keys must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some(&1), Some(&2), Some(&3)]); +/// +/// // It is fused iterator +/// assert_eq!(keys.next(), None); +/// assert_eq!(keys.next(), None); +/// ``` +pub struct Keys<'a, K, V> { + inner: Iter<'a, K, V>, +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Keys<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Keys { + inner: self.inner.clone(), + } + } +} + +impl fmt::Debug for Keys<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// An iterator over the values of a `HashMap` in arbitrary order. +/// The iterator element type is `&'a V`. +/// +/// This `struct` is created by the [`values`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`values`]: struct.HashMap.html#method.values +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut values = map.values(); +/// let mut vec = vec![values.next(), values.next(), values.next()]; +/// +/// // The `Values` iterator produces values in arbitrary order, so the +/// // values must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some(&"a"), Some(&"b"), Some(&"c")]); +/// +/// // It is fused iterator +/// assert_eq!(values.next(), None); +/// assert_eq!(values.next(), None); +/// ``` +pub struct Values<'a, K, V> { + inner: Iter<'a, K, V>, +} + +// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +impl Clone for Values<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Values { + inner: self.inner.clone(), + } + } +} + +impl fmt::Debug for Values<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +/// A draining iterator over the entries of a `HashMap` in arbitrary +/// order. The iterator element type is `(K, V)`. +/// +/// This `struct` is created by the [`drain`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`drain`]: struct.HashMap.html#method.drain +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let mut map: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut drain_iter = map.drain(); +/// let mut vec = vec![drain_iter.next(), drain_iter.next(), drain_iter.next()]; +/// +/// // The `Drain` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some((1, "a")), Some((2, "b")), Some((3, "c"))]); +/// +/// // It is fused iterator +/// assert_eq!(drain_iter.next(), None); +/// assert_eq!(drain_iter.next(), None); +/// ``` +pub struct Drain<'a, K, V, A: Allocator + Clone = Global> { + inner: RawDrain<'a, (K, V), A>, +} + +impl Drain<'_, K, V, A> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn iter(&self) -> Iter<'_, K, V> { + Iter { + inner: self.inner.iter(), + marker: PhantomData, + } + } +} + +/// A draining iterator over entries of a `HashMap` which don't satisfy the predicate +/// `f(&k, &mut v)` in arbitrary order. The iterator element type is `(K, V)`. +/// +/// This `struct` is created by the [`drain_filter`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`drain_filter`]: struct.HashMap.html#method.drain_filter +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let mut map: HashMap = [(1, "a"), (2, "b"), (3, "c")].into(); +/// +/// let mut drain_filter = map.drain_filter(|k, _v| k % 2 != 0); +/// let mut vec = vec![drain_filter.next(), drain_filter.next()]; +/// +/// // The `DrainFilter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [Some((1, "a")),Some((3, "c"))]); +/// +/// // It is fused iterator +/// assert_eq!(drain_filter.next(), None); +/// assert_eq!(drain_filter.next(), None); +/// drop(drain_filter); +/// +/// assert_eq!(map.len(), 1); +/// ``` +pub struct DrainFilter<'a, K, V, F, A: Allocator + Clone = Global> +where + F: FnMut(&K, &mut V) -> bool, +{ + f: F, + inner: DrainFilterInner<'a, K, V, A>, +} + +impl<'a, K, V, F, A> Drop for DrainFilter<'a, K, V, F, A> +where + F: FnMut(&K, &mut V) -> bool, + A: Allocator + Clone, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + while let Some(item) = self.next() { + let guard = ConsumeAllOnDrop(self); + drop(item); + mem::forget(guard); + } + } +} + +pub(super) struct ConsumeAllOnDrop<'a, T: Iterator>(pub &'a mut T); + +impl Drop for ConsumeAllOnDrop<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + self.0.for_each(drop); + } +} + +impl Iterator for DrainFilter<'_, K, V, F, A> +where + F: FnMut(&K, &mut V) -> bool, + A: Allocator + Clone, +{ + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + self.inner.next(&mut self.f) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (0, self.inner.iter.size_hint().1) + } +} + +impl FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} + +/// Portions of `DrainFilter` shared with `set::DrainFilter` +pub(super) struct DrainFilterInner<'a, K, V, A: Allocator + Clone> { + pub iter: RawIter<(K, V)>, + pub table: &'a mut RawTable<(K, V), A>, +} + +impl DrainFilterInner<'_, K, V, A> { + #[cfg_attr(feature = "inline-more", inline)] + pub(super) fn next(&mut self, f: &mut F) -> Option<(K, V)> + where + F: FnMut(&K, &mut V) -> bool, + { + unsafe { + for item in &mut self.iter { + let &mut (ref key, ref mut value) = item.as_mut(); + if f(key, value) { + return Some(self.table.remove(item)); + } + } + } + None + } +} + +/// A mutable iterator over the values of a `HashMap` in arbitrary order. +/// The iterator element type is `&'a mut V`. +/// +/// This `struct` is created by the [`values_mut`] method on [`HashMap`]. See its +/// documentation for more. +/// +/// [`values_mut`]: struct.HashMap.html#method.values_mut +/// [`HashMap`]: struct.HashMap.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashMap; +/// +/// let mut map: HashMap<_, _> = [(1, "One".to_owned()), (2, "Two".into())].into(); +/// +/// let mut values = map.values_mut(); +/// values.next().map(|v| v.push_str(" Mississippi")); +/// values.next().map(|v| v.push_str(" Mississippi")); +/// +/// // It is fused iterator +/// assert_eq!(values.next(), None); +/// assert_eq!(values.next(), None); +/// +/// assert_eq!(map.get(&1).unwrap(), &"One Mississippi".to_owned()); +/// assert_eq!(map.get(&2).unwrap(), &"Two Mississippi".to_owned()); +/// ``` +pub struct ValuesMut<'a, K, V> { + inner: IterMut<'a, K, V>, +} + +/// A builder for computing where in a [`HashMap`] a key-value pair would be stored. +/// +/// See the [`HashMap::raw_entry_mut`] docs for usage examples. +/// +/// [`HashMap::raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{RawEntryBuilderMut, RawEntryMut::Vacant, RawEntryMut::Occupied}; +/// use hashbrown::HashMap; +/// use core::hash::{BuildHasher, Hash}; +/// +/// let mut map = HashMap::new(); +/// map.extend([(1, 11), (2, 12), (3, 13), (4, 14), (5, 15), (6, 16)]); +/// assert_eq!(map.len(), 6); +/// +/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { +/// use core::hash::Hasher; +/// let mut state = hash_builder.build_hasher(); +/// key.hash(&mut state); +/// state.finish() +/// } +/// +/// let builder: RawEntryBuilderMut<_, _, _> = map.raw_entry_mut(); +/// +/// // Existing key +/// match builder.from_key(&6) { +/// Vacant(_) => unreachable!(), +/// Occupied(view) => assert_eq!(view.get(), &16), +/// } +/// +/// for key in 0..12 { +/// let hash = compute_hash(map.hasher(), &key); +/// let value = map.get(&key).cloned(); +/// let key_value = value.as_ref().map(|v| (&key, v)); +/// +/// println!("Key: {} and value: {:?}", key, value); +/// +/// match map.raw_entry_mut().from_key(&key) { +/// Occupied(mut o) => assert_eq!(Some(o.get_key_value()), key_value), +/// Vacant(_) => assert_eq!(value, None), +/// } +/// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &key) { +/// Occupied(mut o) => assert_eq!(Some(o.get_key_value()), key_value), +/// Vacant(_) => assert_eq!(value, None), +/// } +/// match map.raw_entry_mut().from_hash(hash, |q| *q == key) { +/// Occupied(mut o) => assert_eq!(Some(o.get_key_value()), key_value), +/// Vacant(_) => assert_eq!(value, None), +/// } +/// } +/// +/// assert_eq!(map.len(), 6); +/// ``` +pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> { + map: &'a mut HashMap, +} + +/// A view into a single entry in a map, which may either be vacant or occupied. +/// +/// This is a lower-level version of [`Entry`]. +/// +/// This `enum` is constructed through the [`raw_entry_mut`] method on [`HashMap`], +/// then calling one of the methods of that [`RawEntryBuilderMut`]. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`Entry`]: enum.Entry.html +/// [`raw_entry_mut`]: struct.HashMap.html#method.raw_entry_mut +/// [`RawEntryBuilderMut`]: struct.RawEntryBuilderMut.html +/// +/// # Examples +/// +/// ``` +/// use core::hash::{BuildHasher, Hash}; +/// use hashbrown::hash_map::{HashMap, RawEntryMut, RawOccupiedEntryMut}; +/// +/// let mut map = HashMap::new(); +/// map.extend([('a', 1), ('b', 2), ('c', 3)]); +/// assert_eq!(map.len(), 3); +/// +/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { +/// use core::hash::Hasher; +/// let mut state = hash_builder.build_hasher(); +/// key.hash(&mut state); +/// state.finish() +/// } +/// +/// // Existing key (insert) +/// let raw: RawEntryMut<_, _, _> = map.raw_entry_mut().from_key(&'a'); +/// let _raw_o: RawOccupiedEntryMut<_, _, _> = raw.insert('a', 10); +/// assert_eq!(map.len(), 3); +/// +/// // Nonexistent key (insert) +/// map.raw_entry_mut().from_key(&'d').insert('d', 40); +/// assert_eq!(map.len(), 4); +/// +/// // Existing key (or_insert) +/// let hash = compute_hash(map.hasher(), &'b'); +/// let kv = map +/// .raw_entry_mut() +/// .from_key_hashed_nocheck(hash, &'b') +/// .or_insert('b', 20); +/// assert_eq!(kv, (&mut 'b', &mut 2)); +/// *kv.1 = 20; +/// assert_eq!(map.len(), 4); +/// +/// // Nonexistent key (or_insert) +/// let hash = compute_hash(map.hasher(), &'e'); +/// let kv = map +/// .raw_entry_mut() +/// .from_key_hashed_nocheck(hash, &'e') +/// .or_insert('e', 50); +/// assert_eq!(kv, (&mut 'e', &mut 50)); +/// assert_eq!(map.len(), 5); +/// +/// // Existing key (or_insert_with) +/// let hash = compute_hash(map.hasher(), &'c'); +/// let kv = map +/// .raw_entry_mut() +/// .from_hash(hash, |q| q == &'c') +/// .or_insert_with(|| ('c', 30)); +/// assert_eq!(kv, (&mut 'c', &mut 3)); +/// *kv.1 = 30; +/// assert_eq!(map.len(), 5); +/// +/// // Nonexistent key (or_insert_with) +/// let hash = compute_hash(map.hasher(), &'f'); +/// let kv = map +/// .raw_entry_mut() +/// .from_hash(hash, |q| q == &'f') +/// .or_insert_with(|| ('f', 60)); +/// assert_eq!(kv, (&mut 'f', &mut 60)); +/// assert_eq!(map.len(), 6); +/// +/// println!("Our HashMap: {:?}", map); +/// +/// let mut vec: Vec<_> = map.iter().map(|(&k, &v)| (k, v)).collect(); +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [('a', 10), ('b', 20), ('c', 30), ('d', 40), ('e', 50), ('f', 60)]); +/// ``` +pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::{hash_map::RawEntryMut, HashMap}; + /// let mut map: HashMap<_, _> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => unreachable!(), + /// RawEntryMut::Occupied(_) => { } + /// } + /// ``` + Occupied(RawOccupiedEntryMut<'a, K, V, S, A>), + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::{hash_map::RawEntryMut, HashMap}; + /// let mut map: HashMap<&str, i32> = HashMap::new(); + /// + /// match map.raw_entry_mut().from_key("a") { + /// RawEntryMut::Occupied(_) => unreachable!(), + /// RawEntryMut::Vacant(_) => { } + /// } + /// ``` + Vacant(RawVacantEntryMut<'a, K, V, S, A>), +} + +/// A view into an occupied entry in a `HashMap`. +/// It is part of the [`RawEntryMut`] enum. +/// +/// [`RawEntryMut`]: enum.RawEntryMut.html +/// +/// # Examples +/// +/// ``` +/// use core::hash::{BuildHasher, Hash}; +/// use hashbrown::hash_map::{HashMap, RawEntryMut, RawOccupiedEntryMut}; +/// +/// let mut map = HashMap::new(); +/// map.extend([("a", 10), ("b", 20), ("c", 30)]); +/// +/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { +/// use core::hash::Hasher; +/// let mut state = hash_builder.build_hasher(); +/// key.hash(&mut state); +/// state.finish() +/// } +/// +/// let _raw_o: RawOccupiedEntryMut<_, _, _> = map.raw_entry_mut().from_key(&"a").insert("a", 100); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (insert and update) +/// match map.raw_entry_mut().from_key(&"a") { +/// RawEntryMut::Vacant(_) => unreachable!(), +/// RawEntryMut::Occupied(mut view) => { +/// assert_eq!(view.get(), &100); +/// let v = view.get_mut(); +/// let new_v = (*v) * 10; +/// *v = new_v; +/// assert_eq!(view.insert(1111), 1000); +/// } +/// } +/// +/// assert_eq!(map[&"a"], 1111); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (take) +/// let hash = compute_hash(map.hasher(), &"c"); +/// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &"c") { +/// RawEntryMut::Vacant(_) => unreachable!(), +/// RawEntryMut::Occupied(view) => { +/// assert_eq!(view.remove_entry(), ("c", 30)); +/// } +/// } +/// assert_eq!(map.raw_entry().from_key(&"c"), None); +/// assert_eq!(map.len(), 2); +/// +/// let hash = compute_hash(map.hasher(), &"b"); +/// match map.raw_entry_mut().from_hash(hash, |q| *q == "b") { +/// RawEntryMut::Vacant(_) => unreachable!(), +/// RawEntryMut::Occupied(view) => { +/// assert_eq!(view.remove_entry(), ("b", 20)); +/// } +/// } +/// assert_eq!(map.get(&"b"), None); +/// assert_eq!(map.len(), 1); +/// ``` +pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { + elem: Bucket<(K, V)>, + table: &'a mut RawTable<(K, V), A>, + hash_builder: &'a S, +} + +unsafe impl Send for RawOccupiedEntryMut<'_, K, V, S, A> +where + K: Send, + V: Send, + S: Send, + A: Send + Allocator + Clone, +{ +} +unsafe impl Sync for RawOccupiedEntryMut<'_, K, V, S, A> +where + K: Sync, + V: Sync, + S: Sync, + A: Sync + Allocator + Clone, +{ +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`RawEntryMut`] enum. +/// +/// [`RawEntryMut`]: enum.RawEntryMut.html +/// +/// # Examples +/// +/// ``` +/// use core::hash::{BuildHasher, Hash}; +/// use hashbrown::hash_map::{HashMap, RawEntryMut, RawVacantEntryMut}; +/// +/// let mut map = HashMap::<&str, i32>::new(); +/// +/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { +/// use core::hash::Hasher; +/// let mut state = hash_builder.build_hasher(); +/// key.hash(&mut state); +/// state.finish() +/// } +/// +/// let raw_v: RawVacantEntryMut<_, _, _> = match map.raw_entry_mut().from_key(&"a") { +/// RawEntryMut::Vacant(view) => view, +/// RawEntryMut::Occupied(_) => unreachable!(), +/// }; +/// raw_v.insert("a", 10); +/// assert!(map[&"a"] == 10 && map.len() == 1); +/// +/// // Nonexistent key (insert and update) +/// let hash = compute_hash(map.hasher(), &"b"); +/// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &"b") { +/// RawEntryMut::Occupied(_) => unreachable!(), +/// RawEntryMut::Vacant(view) => { +/// let (k, value) = view.insert("b", 2); +/// assert_eq!((*k, *value), ("b", 2)); +/// *value = 20; +/// } +/// } +/// assert!(map[&"b"] == 20 && map.len() == 2); +/// +/// let hash = compute_hash(map.hasher(), &"c"); +/// match map.raw_entry_mut().from_hash(hash, |q| *q == "c") { +/// RawEntryMut::Occupied(_) => unreachable!(), +/// RawEntryMut::Vacant(view) => { +/// assert_eq!(view.insert("c", 30), (&mut "c", &mut 30)); +/// } +/// } +/// assert!(map[&"c"] == 30 && map.len() == 3); +/// ``` +pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { + table: &'a mut RawTable<(K, V), A>, + hash_builder: &'a S, +} + +/// A builder for computing where in a [`HashMap`] a key-value pair would be stored. +/// +/// See the [`HashMap::raw_entry`] docs for usage examples. +/// +/// [`HashMap::raw_entry`]: struct.HashMap.html#method.raw_entry +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{HashMap, RawEntryBuilder}; +/// use core::hash::{BuildHasher, Hash}; +/// +/// let mut map = HashMap::new(); +/// map.extend([(1, 10), (2, 20), (3, 30)]); +/// +/// fn compute_hash(hash_builder: &S, key: &K) -> u64 { +/// use core::hash::Hasher; +/// let mut state = hash_builder.build_hasher(); +/// key.hash(&mut state); +/// state.finish() +/// } +/// +/// for k in 0..6 { +/// let hash = compute_hash(map.hasher(), &k); +/// let v = map.get(&k).cloned(); +/// let kv = v.as_ref().map(|v| (&k, v)); +/// +/// println!("Key: {} and value: {:?}", k, v); +/// let builder: RawEntryBuilder<_, _, _> = map.raw_entry(); +/// assert_eq!(builder.from_key(&k), kv); +/// assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); +/// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); +/// } +/// ``` +pub struct RawEntryBuilder<'a, K, V, S, A: Allocator + Clone = Global> { + map: &'a HashMap, +} + +impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { + /// Creates a `RawEntryMut` from the given key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let key = "a"; + /// let entry: RawEntryMut<&str, u32, _> = map.raw_entry_mut().from_key(&key); + /// entry.insert(key, 100); + /// assert_eq!(map[&"a"], 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_key(self, k: &Q) -> RawEntryMut<'a, K, V, S, A> + where + S: BuildHasher, + K: Borrow, + Q: Hash + Eq, + { + let hash = make_hash::(&self.map.hash_builder, k); + self.from_key_hashed_nocheck(hash, k) + } + + /// Creates a `RawEntryMut` from the given key and its hash. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let key = "a"; + /// let hash = compute_hash(map.hasher(), &key); + /// let entry: RawEntryMut<&str, u32, _> = map.raw_entry_mut().from_key_hashed_nocheck(hash, &key); + /// entry.insert(key, 100); + /// assert_eq!(map[&"a"], 100); + /// ``` + #[inline] + #[allow(clippy::wrong_self_convention)] + pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S, A> + where + K: Borrow, + Q: Eq, + { + self.from_hash(hash, equivalent(k)) + } +} + +impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { + /// Creates a `RawEntryMut` from the given hash and matching function. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let key = "a"; + /// let hash = compute_hash(map.hasher(), &key); + /// let entry: RawEntryMut<&str, u32, _> = map.raw_entry_mut().from_hash(hash, |k| k == &key); + /// entry.insert(key, 100); + /// assert_eq!(map[&"a"], 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_hash(self, hash: u64, is_match: F) -> RawEntryMut<'a, K, V, S, A> + where + for<'b> F: FnMut(&'b K) -> bool, + { + self.search(hash, is_match) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn search(self, hash: u64, mut is_match: F) -> RawEntryMut<'a, K, V, S, A> + where + for<'b> F: FnMut(&'b K) -> bool, + { + match self.map.table.find(hash, |(k, _)| is_match(k)) { + Some(elem) => RawEntryMut::Occupied(RawOccupiedEntryMut { + elem, + table: &mut self.map.table, + hash_builder: &self.map.hash_builder, + }), + None => RawEntryMut::Vacant(RawVacantEntryMut { + table: &mut self.map.table, + hash_builder: &self.map.hash_builder, + }), + } + } +} + +impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { + /// Access an immutable entry by key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// let key = "a"; + /// assert_eq!(map.raw_entry().from_key(&key), Some((&"a", &100))); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_key(self, k: &Q) -> Option<(&'a K, &'a V)> + where + S: BuildHasher, + K: Borrow, + Q: Hash + Eq, + { + let hash = make_hash::(&self.map.hash_builder, k); + self.from_key_hashed_nocheck(hash, k) + } + + /// Access an immutable entry by a key and its hash. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::HashMap; + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// let key = "a"; + /// let hash = compute_hash(map.hasher(), &key); + /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &key), Some((&"a", &100))); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_key_hashed_nocheck(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)> + where + K: Borrow, + Q: Eq, + { + self.from_hash(hash, equivalent(k)) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn search(self, hash: u64, mut is_match: F) -> Option<(&'a K, &'a V)> + where + F: FnMut(&K) -> bool, + { + match self.map.table.get(hash, |(k, _)| is_match(k)) { + Some(&(ref key, ref value)) => Some((key, value)), + None => None, + } + } + + /// Access an immutable entry by hash and matching function. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::HashMap; + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// let key = "a"; + /// let hash = compute_hash(map.hasher(), &key); + /// assert_eq!(map.raw_entry().from_hash(hash, |k| k == &key), Some((&"a", &100))); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::wrong_self_convention)] + pub fn from_hash(self, hash: u64, is_match: F) -> Option<(&'a K, &'a V)> + where + F: FnMut(&K) -> bool, + { + self.search(hash, is_match) + } +} + +impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> { + /// Sets the value of the entry, and returns a RawOccupiedEntryMut. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let entry = map.raw_entry_mut().from_key("horseyland").insert("horseyland", 37); + /// + /// assert_eq!(entry.remove_entry(), ("horseyland", 37)); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, key: K, value: V) -> RawOccupiedEntryMut<'a, K, V, S, A> + where + K: Hash, + S: BuildHasher, + { + match self { + RawEntryMut::Occupied(mut entry) => { + entry.insert(value); + entry + } + RawEntryMut::Vacant(entry) => entry.insert_entry(key, value), + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// mutable references to the key and value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 3); + /// assert_eq!(map["poneyland"], 3); + /// + /// *map.raw_entry_mut().from_key("poneyland").or_insert("poneyland", 10).1 *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default_key: K, default_val: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + match self { + RawEntryMut::Occupied(entry) => entry.into_key_value(), + RawEntryMut::Vacant(entry) => entry.insert(default_key, default_val), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns mutable references to the key and value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, String> = HashMap::new(); + /// + /// map.raw_entry_mut().from_key("poneyland").or_insert_with(|| { + /// ("poneyland", "hoho".to_string()) + /// }); + /// + /// assert_eq!(map["poneyland"], "hoho".to_string()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with(self, default: F) -> (&'a mut K, &'a mut V) + where + F: FnOnce() -> (K, V), + K: Hash, + S: BuildHasher, + { + match self { + RawEntryMut::Occupied(entry) => entry.into_key_value(), + RawEntryMut::Vacant(entry) => { + let (k, v) = default(); + entry.insert(k, v) + } + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.raw_entry_mut() + /// .from_key("poneyland") + /// .and_modify(|_k, v| { *v += 1 }) + /// .or_insert("poneyland", 42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.raw_entry_mut() + /// .from_key("poneyland") + /// .and_modify(|_k, v| { *v += 1 }) + /// .or_insert("poneyland", 0); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut K, &mut V), + { + match self { + RawEntryMut::Occupied(mut entry) => { + { + let (k, v) = entry.get_key_value_mut(); + f(k, v); + } + RawEntryMut::Occupied(entry) + } + RawEntryMut::Vacant(entry) => RawEntryMut::Vacant(entry), + } + } + + /// Provides shared access to the key and owned access to the value of + /// an occupied entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RawEntryMut; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// let entry = map + /// .raw_entry_mut() + /// .from_key("poneyland") + /// .and_replace_entry_with(|_k, _v| panic!()); + /// + /// match entry { + /// RawEntryMut::Vacant(_) => {}, + /// RawEntryMut::Occupied(_) => panic!(), + /// } + /// + /// map.insert("poneyland", 42); + /// + /// let entry = map + /// .raw_entry_mut() + /// .from_key("poneyland") + /// .and_replace_entry_with(|k, v| { + /// assert_eq!(k, &"poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }); + /// + /// match entry { + /// RawEntryMut::Occupied(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// assert_eq!(e.get(), &43); + /// }, + /// RawEntryMut::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = map + /// .raw_entry_mut() + /// .from_key("poneyland") + /// .and_replace_entry_with(|_k, _v| None); + /// + /// match entry { + /// RawEntryMut::Vacant(_) => {}, + /// RawEntryMut::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_replace_entry_with(self, f: F) -> Self + where + F: FnOnce(&K, V) -> Option, + { + match self { + RawEntryMut::Occupied(entry) => entry.replace_entry_with(f), + RawEntryMut::Vacant(_) => self, + } + } +} + +impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { + /// Gets a reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => assert_eq!(o.key(), &"a") + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + unsafe { &self.elem.as_ref().0 } + } + + /// Gets a mutable reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// use std::rc::Rc; + /// + /// let key_one = Rc::new("a"); + /// let key_two = Rc::new("a"); + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(key_one.clone(), 10); + /// + /// assert_eq!(map[&key_one], 10); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// match map.raw_entry_mut().from_key(&key_one) { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(mut o) => { + /// *o.key_mut() = key_two.clone(); + /// } + /// } + /// assert_eq!(map[&key_two], 10); + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key_mut(&mut self) -> &mut K { + unsafe { &mut self.elem.as_mut().0 } + } + + /// Converts the entry into a mutable reference to the key in the entry + /// with a lifetime bound to the map itself. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// use std::rc::Rc; + /// + /// let key_one = Rc::new("a"); + /// let key_two = Rc::new("a"); + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(key_one.clone(), 10); + /// + /// assert_eq!(map[&key_one], 10); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// let inside_key: &mut Rc<&str>; + /// + /// match map.raw_entry_mut().from_key(&key_one) { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => inside_key = o.into_key(), + /// } + /// *inside_key = key_two.clone(); + /// + /// assert_eq!(map[&key_two], 10); + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key(self) -> &'a mut K { + unsafe { &mut self.elem.as_mut().0 } + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => assert_eq!(o.get(), &100), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &V { + unsafe { &self.elem.as_ref().1 } + } + + /// Converts the OccupiedEntry into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// let value: &mut u32; + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => value = o.into_mut(), + /// } + /// *value += 900; + /// + /// assert_eq!(map[&"a"], 1000); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_mut(self) -> &'a mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(mut o) => *o.get_mut() += 900, + /// } + /// + /// assert_eq!(map[&"a"], 1000); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Gets a reference to the key and value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => assert_eq!(o.get_key_value(), (&"a", &100)), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_key_value(&self) -> (&K, &V) { + unsafe { + let &(ref key, ref value) = self.elem.as_ref(); + (key, value) + } + } + + /// Gets a mutable reference to the key and value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// use std::rc::Rc; + /// + /// let key_one = Rc::new("a"); + /// let key_two = Rc::new("a"); + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(key_one.clone(), 10); + /// + /// assert_eq!(map[&key_one], 10); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// match map.raw_entry_mut().from_key(&key_one) { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(mut o) => { + /// let (inside_key, inside_value) = o.get_key_value_mut(); + /// *inside_key = key_two.clone(); + /// *inside_value = 100; + /// } + /// } + /// assert_eq!(map[&key_two], 100); + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_key_value_mut(&mut self) -> (&mut K, &mut V) { + unsafe { + let &mut (ref mut key, ref mut value) = self.elem.as_mut(); + (key, value) + } + } + + /// Converts the OccupiedEntry into a mutable reference to the key and value in the entry + /// with a lifetime bound to the map itself. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// use std::rc::Rc; + /// + /// let key_one = Rc::new("a"); + /// let key_two = Rc::new("a"); + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(key_one.clone(), 10); + /// + /// assert_eq!(map[&key_one], 10); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// let inside_key: &mut Rc<&str>; + /// let inside_value: &mut u32; + /// match map.raw_entry_mut().from_key(&key_one) { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => { + /// let tuple = o.into_key_value(); + /// inside_key = tuple.0; + /// inside_value = tuple.1; + /// } + /// } + /// *inside_key = key_two.clone(); + /// *inside_value = 100; + /// assert_eq!(map[&key_two], 100); + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key_value(self) -> (&'a mut K, &'a mut V) { + unsafe { + let &mut (ref mut key, ref mut value) = self.elem.as_mut(); + (key, value) + } + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(mut o) => assert_eq!(o.insert(1000), 100), + /// } + /// + /// assert_eq!(map[&"a"], 1000); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// use std::rc::Rc; + /// + /// let key_one = Rc::new("a"); + /// let key_two = Rc::new("a"); + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(key_one.clone(), 10); + /// + /// assert_eq!(map[&key_one], 10); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// match map.raw_entry_mut().from_key(&key_one) { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(mut o) => { + /// let old_key = o.insert_key(key_two.clone()); + /// assert!(Rc::ptr_eq(&old_key, &key_one)); + /// } + /// } + /// assert_eq!(map[&key_two], 10); + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_key(&mut self, key: K) -> K { + mem::replace(self.key_mut(), key) + } + + /// Takes the value out of the entry, and returns it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => assert_eq!(o.remove(), 100), + /// } + /// assert_eq!(map.get(&"a"), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Take the ownership of the key and value from the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => assert_eq!(o.remove_entry(), ("a", 100)), + /// } + /// assert_eq!(map.get(&"a"), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.table.remove(self.elem) } + } + + /// Provides shared access to the key and owned access to the value of + /// the entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// let raw_entry = match map.raw_entry_mut().from_key(&"a") { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => o.replace_entry_with(|k, v| { + /// assert_eq!(k, &"a"); + /// assert_eq!(v, 100); + /// Some(v + 900) + /// }), + /// }; + /// let raw_entry = match raw_entry { + /// RawEntryMut::Vacant(_) => panic!(), + /// RawEntryMut::Occupied(o) => o.replace_entry_with(|k, v| { + /// assert_eq!(k, &"a"); + /// assert_eq!(v, 1000); + /// None + /// }), + /// }; + /// match raw_entry { + /// RawEntryMut::Vacant(_) => { }, + /// RawEntryMut::Occupied(_) => panic!(), + /// }; + /// assert_eq!(map.get(&"a"), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry_with(self, f: F) -> RawEntryMut<'a, K, V, S, A> + where + F: FnOnce(&K, V) -> Option, + { + unsafe { + let still_occupied = self + .table + .replace_bucket_with(self.elem.clone(), |(key, value)| { + f(&key, value).map(|new_value| (key, new_value)) + }); + + if still_occupied { + RawEntryMut::Occupied(self) + } else { + RawEntryMut::Vacant(RawVacantEntryMut { + table: self.table, + hash_builder: self.hash_builder, + }) + } + } + } +} + +impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { + /// Sets the value of the entry with the VacantEntry's key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// + /// match map.raw_entry_mut().from_key(&"c") { + /// RawEntryMut::Occupied(_) => panic!(), + /// RawEntryMut::Vacant(v) => assert_eq!(v.insert("c", 300), (&mut "c", &mut 300)), + /// } + /// + /// assert_eq!(map[&"c"], 300); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, key: K, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + let hash = make_insert_hash::(self.hash_builder, &key); + self.insert_hashed_nocheck(hash, key, value) + } + + /// Sets the value of the entry with the VacantEntry's key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// fn compute_hash(hash_builder: &S, key: &K) -> u64 { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// + /// let mut map: HashMap<&str, u32> = [("a", 100), ("b", 200)].into(); + /// let key = "c"; + /// let hash = compute_hash(map.hasher(), &key); + /// + /// match map.raw_entry_mut().from_key_hashed_nocheck(hash, &key) { + /// RawEntryMut::Occupied(_) => panic!(), + /// RawEntryMut::Vacant(v) => assert_eq!( + /// v.insert_hashed_nocheck(hash, key, 300), + /// (&mut "c", &mut 300) + /// ), + /// } + /// + /// assert_eq!(map[&"c"], 300); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::shadow_unrelated)] + pub fn insert_hashed_nocheck(self, hash: u64, key: K, value: V) -> (&'a mut K, &'a mut V) + where + K: Hash, + S: BuildHasher, + { + let &mut (ref mut k, ref mut v) = self.table.insert_entry( + hash, + (key, value), + make_hasher::(self.hash_builder), + ); + (k, v) + } + + /// Set the value of an entry with a custom hasher function. + /// + /// # Examples + /// + /// ``` + /// use core::hash::{BuildHasher, Hash}; + /// use hashbrown::hash_map::{HashMap, RawEntryMut}; + /// + /// fn make_hasher(hash_builder: &S) -> impl Fn(&K) -> u64 + '_ + /// where + /// K: Hash + ?Sized, + /// S: BuildHasher, + /// { + /// move |key: &K| { + /// use core::hash::Hasher; + /// let mut state = hash_builder.build_hasher(); + /// key.hash(&mut state); + /// state.finish() + /// } + /// } + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let key = "a"; + /// let hash_builder = map.hasher().clone(); + /// let hash = make_hasher(&hash_builder)(&key); + /// + /// match map.raw_entry_mut().from_hash(hash, |q| q == &key) { + /// RawEntryMut::Occupied(_) => panic!(), + /// RawEntryMut::Vacant(v) => assert_eq!( + /// v.insert_with_hasher(hash, key, 100, make_hasher(&hash_builder)), + /// (&mut "a", &mut 100) + /// ), + /// } + /// map.extend([("b", 200), ("c", 300), ("d", 400), ("e", 500), ("f", 600)]); + /// assert_eq!(map[&"a"], 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_with_hasher( + self, + hash: u64, + key: K, + value: V, + hasher: H, + ) -> (&'a mut K, &'a mut V) + where + H: Fn(&K) -> u64, + { + let &mut (ref mut k, ref mut v) = self + .table + .insert_entry(hash, (key, value), |x| hasher(&x.0)); + (k, v) + } + + #[cfg_attr(feature = "inline-more", inline)] + fn insert_entry(self, key: K, value: V) -> RawOccupiedEntryMut<'a, K, V, S, A> + where + K: Hash, + S: BuildHasher, + { + let hash = make_insert_hash::(self.hash_builder, &key); + let elem = self.table.insert( + hash, + (key, value), + make_hasher::(self.hash_builder), + ); + RawOccupiedEntryMut { + elem, + table: self.table, + hash_builder: self.hash_builder, + } + } +} + +impl Debug for RawEntryBuilderMut<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawEntryBuilder").finish() + } +} + +impl Debug for RawEntryMut<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(), + RawEntryMut::Occupied(ref o) => f.debug_tuple("RawEntry").field(o).finish(), + } + } +} + +impl Debug for RawOccupiedEntryMut<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawOccupiedEntryMut") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +impl Debug for RawVacantEntryMut<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawVacantEntryMut").finish() + } +} + +impl Debug for RawEntryBuilder<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RawEntryBuilder").finish() + } +} + +/// A view into a single entry in a map, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`entry`] method on [`HashMap`]. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`entry`]: struct.HashMap.html#method.entry +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{Entry, HashMap, OccupiedEntry}; +/// +/// let mut map = HashMap::new(); +/// map.extend([("a", 10), ("b", 20), ("c", 30)]); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (insert) +/// let entry: Entry<_, _, _> = map.entry("a"); +/// let _raw_o: OccupiedEntry<_, _, _> = entry.insert(1); +/// assert_eq!(map.len(), 3); +/// // Nonexistent key (insert) +/// map.entry("d").insert(4); +/// +/// // Existing key (or_insert) +/// let v = map.entry("b").or_insert(2); +/// assert_eq!(std::mem::replace(v, 2), 20); +/// // Nonexistent key (or_insert) +/// map.entry("e").or_insert(5); +/// +/// // Existing key (or_insert_with) +/// let v = map.entry("c").or_insert_with(|| 3); +/// assert_eq!(std::mem::replace(v, 3), 30); +/// // Nonexistent key (or_insert_with) +/// map.entry("f").or_insert_with(|| 6); +/// +/// println!("Our HashMap: {:?}", map); +/// +/// let mut vec: Vec<_> = map.iter().map(|(&k, &v)| (k, v)).collect(); +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, [("a", 1), ("b", 2), ("c", 3), ("d", 4), ("e", 5), ("f", 6)]); +/// ``` +pub enum Entry<'a, K, V, S, A = Global> +where + A: Allocator + Clone, +{ + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// let mut map: HashMap<_, _> = [("a", 100), ("b", 200)].into(); + /// + /// match map.entry("a") { + /// Entry::Vacant(_) => unreachable!(), + /// Entry::Occupied(_) => { } + /// } + /// ``` + Occupied(OccupiedEntry<'a, K, V, S, A>), + + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// let mut map: HashMap<&str, i32> = HashMap::new(); + /// + /// match map.entry("a") { + /// Entry::Occupied(_) => unreachable!(), + /// Entry::Vacant(_) => { } + /// } + /// ``` + Vacant(VacantEntry<'a, K, V, S, A>), +} + +impl Debug for Entry<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +/// A view into an occupied entry in a `HashMap`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{Entry, HashMap, OccupiedEntry}; +/// +/// let mut map = HashMap::new(); +/// map.extend([("a", 10), ("b", 20), ("c", 30)]); +/// +/// let _entry_o: OccupiedEntry<_, _, _> = map.entry("a").insert(100); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (insert and update) +/// match map.entry("a") { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(mut view) => { +/// assert_eq!(view.get(), &100); +/// let v = view.get_mut(); +/// *v *= 10; +/// assert_eq!(view.insert(1111), 1000); +/// } +/// } +/// +/// assert_eq!(map[&"a"], 1111); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (take) +/// match map.entry("c") { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.remove_entry(), ("c", 30)); +/// } +/// } +/// assert_eq!(map.get(&"c"), None); +/// assert_eq!(map.len(), 2); +/// ``` +pub struct OccupiedEntry<'a, K, V, S, A: Allocator + Clone = Global> { + hash: u64, + key: Option, + elem: Bucket<(K, V)>, + table: &'a mut HashMap, +} + +unsafe impl Send for OccupiedEntry<'_, K, V, S, A> +where + K: Send, + V: Send, + S: Send, + A: Send + Allocator + Clone, +{ +} +unsafe impl Sync for OccupiedEntry<'_, K, V, S, A> +where + K: Sync, + V: Sync, + S: Sync, + A: Sync + Allocator + Clone, +{ +} + +impl Debug for OccupiedEntry<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{Entry, HashMap, VacantEntry}; +/// +/// let mut map = HashMap::<&str, i32>::new(); +/// +/// let entry_v: VacantEntry<_, _, _> = match map.entry("a") { +/// Entry::Vacant(view) => view, +/// Entry::Occupied(_) => unreachable!(), +/// }; +/// entry_v.insert(10); +/// assert!(map[&"a"] == 10 && map.len() == 1); +/// +/// // Nonexistent key (insert and update) +/// match map.entry("b") { +/// Entry::Occupied(_) => unreachable!(), +/// Entry::Vacant(view) => { +/// let value = view.insert(2); +/// assert_eq!(*value, 2); +/// *value = 20; +/// } +/// } +/// assert!(map[&"b"] == 20 && map.len() == 2); +/// ``` +pub struct VacantEntry<'a, K, V, S, A: Allocator + Clone = Global> { + hash: u64, + key: K, + table: &'a mut HashMap, +} + +impl Debug for VacantEntry<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntry").field(self.key()).finish() + } +} + +/// A view into a single entry in a map, which may either be vacant or occupied, +/// with any borrowed form of the map's key type. +/// +/// +/// This `enum` is constructed from the [`entry_ref`] method on [`HashMap`]. +/// +/// [`Hash`] and [`Eq`] on the borrowed form of the map's key type *must* match those +/// for the key type. It also require that key may be constructed from the borrowed +/// form through the [`From`] trait. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`entry_ref`]: struct.HashMap.html#method.entry_ref +/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html +/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html +/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{EntryRef, HashMap, OccupiedEntryRef}; +/// +/// let mut map = HashMap::new(); +/// map.extend([("a".to_owned(), 10), ("b".into(), 20), ("c".into(), 30)]); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (insert) +/// let key = String::from("a"); +/// let entry: EntryRef<_, _, _, _> = map.entry_ref(&key); +/// let _raw_o: OccupiedEntryRef<_, _, _, _> = entry.insert(1); +/// assert_eq!(map.len(), 3); +/// // Nonexistent key (insert) +/// map.entry_ref("d").insert(4); +/// +/// // Existing key (or_insert) +/// let v = map.entry_ref("b").or_insert(2); +/// assert_eq!(std::mem::replace(v, 2), 20); +/// // Nonexistent key (or_insert) +/// map.entry_ref("e").or_insert(5); +/// +/// // Existing key (or_insert_with) +/// let v = map.entry_ref("c").or_insert_with(|| 3); +/// assert_eq!(std::mem::replace(v, 3), 30); +/// // Nonexistent key (or_insert_with) +/// map.entry_ref("f").or_insert_with(|| 6); +/// +/// println!("Our HashMap: {:?}", map); +/// +/// for (key, value) in ["a", "b", "c", "d", "e", "f"].into_iter().zip(1..=6) { +/// assert_eq!(map[key], value) +/// } +/// assert_eq!(map.len(), 6); +/// ``` +pub enum EntryRef<'a, 'b, K, Q: ?Sized, V, S, A = Global> +where + A: Allocator + Clone, +{ + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{EntryRef, HashMap}; + /// let mut map: HashMap<_, _> = [("a".to_owned(), 100), ("b".into(), 200)].into(); + /// + /// match map.entry_ref("a") { + /// EntryRef::Vacant(_) => unreachable!(), + /// EntryRef::Occupied(_) => { } + /// } + /// ``` + Occupied(OccupiedEntryRef<'a, 'b, K, Q, V, S, A>), + + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{EntryRef, HashMap}; + /// let mut map: HashMap = HashMap::new(); + /// + /// match map.entry_ref("a") { + /// EntryRef::Occupied(_) => unreachable!(), + /// EntryRef::Vacant(_) => { } + /// } + /// ``` + Vacant(VacantEntryRef<'a, 'b, K, Q, V, S, A>), +} + +impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug + for EntryRef<'_, '_, K, Q, V, S, A> +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + EntryRef::Vacant(ref v) => f.debug_tuple("EntryRef").field(v).finish(), + EntryRef::Occupied(ref o) => f.debug_tuple("EntryRef").field(o).finish(), + } + } +} + +enum KeyOrRef<'a, K, Q: ?Sized> { + Borrowed(&'a Q), + Owned(K), +} + +impl<'a, K, Q: ?Sized> KeyOrRef<'a, K, Q> { + fn into_owned(self) -> K + where + K: From<&'a Q>, + { + match self { + Self::Borrowed(borrowed) => borrowed.into(), + Self::Owned(owned) => owned, + } + } +} + +impl<'a, K: Borrow, Q: ?Sized> AsRef for KeyOrRef<'a, K, Q> { + fn as_ref(&self) -> &Q { + match self { + Self::Borrowed(borrowed) => borrowed, + Self::Owned(owned) => owned.borrow(), + } + } +} + +/// A view into an occupied entry in a `HashMap`. +/// It is part of the [`EntryRef`] enum. +/// +/// [`EntryRef`]: enum.EntryRef.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{EntryRef, HashMap, OccupiedEntryRef}; +/// +/// let mut map = HashMap::new(); +/// map.extend([("a".to_owned(), 10), ("b".into(), 20), ("c".into(), 30)]); +/// +/// let key = String::from("a"); +/// let _entry_o: OccupiedEntryRef<_, _, _, _> = map.entry_ref(&key).insert(100); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (insert and update) +/// match map.entry_ref("a") { +/// EntryRef::Vacant(_) => unreachable!(), +/// EntryRef::Occupied(mut view) => { +/// assert_eq!(view.get(), &100); +/// let v = view.get_mut(); +/// *v *= 10; +/// assert_eq!(view.insert(1111), 1000); +/// } +/// } +/// +/// assert_eq!(map["a"], 1111); +/// assert_eq!(map.len(), 3); +/// +/// // Existing key (take) +/// match map.entry_ref("c") { +/// EntryRef::Vacant(_) => unreachable!(), +/// EntryRef::Occupied(view) => { +/// assert_eq!(view.remove_entry(), ("c".to_owned(), 30)); +/// } +/// } +/// assert_eq!(map.get("c"), None); +/// assert_eq!(map.len(), 2); +/// ``` +pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { + hash: u64, + key: Option>, + elem: Bucket<(K, V)>, + table: &'a mut HashMap, +} + +unsafe impl<'a, 'b, K, Q, V, S, A> Send for OccupiedEntryRef<'a, 'b, K, Q, V, S, A> +where + K: Send, + Q: Sync + ?Sized, + V: Send, + S: Send, + A: Send + Allocator + Clone, +{ +} +unsafe impl<'a, 'b, K, Q, V, S, A> Sync for OccupiedEntryRef<'a, 'b, K, Q, V, S, A> +where + K: Sync, + Q: Sync + ?Sized, + V: Sync, + S: Sync, + A: Sync + Allocator + Clone, +{ +} + +impl, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug + for OccupiedEntryRef<'_, '_, K, Q, V, S, A> +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntryRef") + .field("key", &self.key()) + .field("value", &self.get()) + .finish() + } +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`EntryRef`] enum. +/// +/// [`EntryRef`]: enum.EntryRef.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{EntryRef, HashMap, VacantEntryRef}; +/// +/// let mut map = HashMap::::new(); +/// +/// let entry_v: VacantEntryRef<_, _, _, _> = match map.entry_ref("a") { +/// EntryRef::Vacant(view) => view, +/// EntryRef::Occupied(_) => unreachable!(), +/// }; +/// entry_v.insert(10); +/// assert!(map["a"] == 10 && map.len() == 1); +/// +/// // Nonexistent key (insert and update) +/// match map.entry_ref("b") { +/// EntryRef::Occupied(_) => unreachable!(), +/// EntryRef::Vacant(view) => { +/// let value = view.insert(2); +/// assert_eq!(*value, 2); +/// *value = 20; +/// } +/// } +/// assert!(map["b"] == 20 && map.len() == 2); +/// ``` +pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { + hash: u64, + key: KeyOrRef<'b, K, Q>, + table: &'a mut HashMap, +} + +impl, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug + for VacantEntryRef<'_, '_, K, Q, V, S, A> +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntryRef").field(&self.key()).finish() + } +} + +/// The error returned by [`try_insert`](HashMap::try_insert) when the key already exists. +/// +/// Contains the occupied entry, and the value that was not inserted. +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_map::{HashMap, OccupiedError}; +/// +/// let mut map: HashMap<_, _> = [("a", 10), ("b", 20)].into(); +/// +/// // try_insert method returns mutable reference to the value if keys are vacant, +/// // but if the map did have key present, nothing is updated, and the provided +/// // value is returned inside `Err(_)` variant +/// match map.try_insert("a", 100) { +/// Err(OccupiedError { mut entry, value }) => { +/// assert_eq!(entry.key(), &"a"); +/// assert_eq!(value, 100); +/// assert_eq!(entry.insert(100), 10) +/// } +/// _ => unreachable!(), +/// } +/// assert_eq!(map[&"a"], 100); +/// ``` +pub struct OccupiedError<'a, K, V, S, A: Allocator + Clone = Global> { + /// The entry in the map that was already occupied. + pub entry: OccupiedEntry<'a, K, V, S, A>, + /// The value which was not inserted, because the entry was already occupied. + pub value: V, +} + +impl Debug for OccupiedError<'_, K, V, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedError") + .field("key", self.entry.key()) + .field("old_value", self.entry.get()) + .field("new_value", &self.value) + .finish() + } +} + +impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display + for OccupiedError<'a, K, V, S, A> +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "failed to insert {:?}, key {:?} already exists with value {:?}", + self.value, + self.entry.key(), + self.entry.get(), + ) + } +} + +impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap { + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V>; + + /// Creates an iterator over the entries of a `HashMap` in arbitrary order. + /// The iterator element type is `(&'a K, &'a V)`. + /// + /// Return the same `Iter` struct as by the [`iter`] method on [`HashMap`]. + /// + /// [`iter`]: struct.HashMap.html#method.iter + /// [`HashMap`]: struct.HashMap.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let map_one: HashMap<_, _> = [(1, "a"), (2, "b"), (3, "c")].into(); + /// let mut map_two = HashMap::new(); + /// + /// for (key, value) in &map_one { + /// println!("Key: {}, Value: {}", key, value); + /// map_two.insert_unique_unchecked(*key, *value); + /// } + /// + /// assert_eq!(map_one, map_two); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> Iter<'a, K, V> { + self.iter() + } +} + +impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap { + type Item = (&'a K, &'a mut V); + type IntoIter = IterMut<'a, K, V>; + + /// Creates an iterator over the entries of a `HashMap` in arbitrary order + /// with mutable references to the values. The iterator element type is + /// `(&'a K, &'a mut V)`. + /// + /// Return the same `IterMut` struct as by the [`iter_mut`] method on + /// [`HashMap`]. + /// + /// [`iter_mut`]: struct.HashMap.html#method.iter_mut + /// [`HashMap`]: struct.HashMap.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// let mut map: HashMap<_, _> = [("a", 1), ("b", 2), ("c", 3)].into(); + /// + /// for (key, value) in &mut map { + /// println!("Key: {}, Value: {}", key, value); + /// *value *= 2; + /// } + /// + /// let mut vec = map.iter().collect::>(); + /// // The `Iter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [(&"a", &2), (&"b", &4), (&"c", &6)]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> IterMut<'a, K, V> { + self.iter_mut() + } +} + +impl IntoIterator for HashMap { + type Item = (K, V); + type IntoIter = IntoIter; + + /// Creates a consuming iterator, that is, one that moves each key-value + /// pair out of the map in arbitrary order. The map cannot be used after + /// calling this. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let map: HashMap<_, _> = [("a", 1), ("b", 2), ("c", 3)].into(); + /// + /// // Not possible with .iter() + /// let mut vec: Vec<(&str, i32)> = map.into_iter().collect(); + /// // The `IntoIter` iterator produces items in arbitrary order, so + /// // the items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [("a", 1), ("b", 2), ("c", 3)]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> IntoIter { + IntoIter { + inner: self.table.into_iter(), + } + } +} + +impl<'a, K, V> Iterator for Iter<'a, K, V> { + type Item = (&'a K, &'a V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(&'a K, &'a V)> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some(x) => unsafe { + let r = x.as_ref(); + Some((&r.0, &r.1)) + }, + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for Iter<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} + +impl FusedIterator for Iter<'_, K, V> {} + +impl<'a, K, V> Iterator for IterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(&'a K, &'a mut V)> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some(x) => unsafe { + let r = x.as_mut(); + Some((&r.0, &mut r.1)) + }, + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for IterMut<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for IterMut<'_, K, V> {} + +impl fmt::Debug for IterMut<'_, K, V> +where + K: fmt::Debug, + V: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl Iterator for IntoIter { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(K, V)> { + self.inner.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for IntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for IntoIter {} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl<'a, K, V> Iterator for Keys<'a, K, V> { + type Item = &'a K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a K> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some((k, _)) => Some(k), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for Keys<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for Keys<'_, K, V> {} + +impl<'a, K, V> Iterator for Values<'a, K, V> { + type Item = &'a V; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a V> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some((_, v)) => Some(v), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for Values<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for Values<'_, K, V> {} + +impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { + type Item = &'a mut V; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a mut V> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.inner.next() { + Some((_, v)) => Some(v), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for ValuesMut<'_, K, V> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for ValuesMut<'_, K, V> {} + +impl fmt::Debug for ValuesMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list() + .entries(self.inner.iter().map(|(_, val)| val)) + .finish() + } +} + +impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> { + type Item = (K, V); + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<(K, V)> { + self.inner.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} +impl ExactSizeIterator for Drain<'_, K, V, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.inner.len() + } +} +impl FusedIterator for Drain<'_, K, V, A> {} + +impl fmt::Debug for Drain<'_, K, V, A> +where + K: fmt::Debug, + V: fmt::Debug, + A: Allocator + Clone, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { + /// Sets the value of the entry, and returns an OccupiedEntry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let entry = map.entry("horseyland").insert(37); + /// + /// assert_eq!(entry.key(), &"horseyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> OccupiedEntry<'a, K, V, S, A> + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(mut entry) => { + entry.insert(value); + entry + } + Entry::Vacant(entry) => entry.insert_entry(value), + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// // nonexistent key + /// map.entry("poneyland").or_insert(3); + /// assert_eq!(map["poneyland"], 3); + /// + /// // existing key + /// *map.entry("poneyland").or_insert(10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default: V) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// // nonexistent key + /// map.entry("poneyland").or_insert_with(|| 3); + /// assert_eq!(map["poneyland"], 3); + /// + /// // existing key + /// *map.entry("poneyland").or_insert_with(|| 10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with V>(self, default: F) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(default()), + } + } + + /// Ensures a value is in the entry by inserting, if empty, the result of the default function. + /// This method allows for generating key-derived values for insertion by providing the default + /// function a reference to the key that was moved during the `.entry(key)` method call. + /// + /// The reference to the moved key is provided so that cloning or copying the key is + /// unnecessary, unlike with `.or_insert_with(|| ... )`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, usize> = HashMap::new(); + /// + /// // nonexistent key + /// map.entry("poneyland").or_insert_with_key(|key| key.chars().count()); + /// assert_eq!(map["poneyland"], 9); + /// + /// // existing key + /// *map.entry("poneyland").or_insert_with_key(|key| key.chars().count() * 10) *= 2; + /// assert_eq!(map["poneyland"], 18); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with_key V>(self, default: F) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => { + let value = default(entry.key()); + entry.insert(value) + } + } + } + + /// Returns a reference to this entry's key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(3); + /// // existing key + /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); + /// // nonexistent key + /// assert_eq!(map.entry("horseland").key(), &"horseland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + match *self { + Entry::Occupied(ref entry) => entry.key(), + Entry::Vacant(ref entry) => entry.key(), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut V), + { + match self { + Entry::Occupied(mut entry) => { + f(entry.get_mut()); + Entry::Occupied(entry) + } + Entry::Vacant(entry) => Entry::Vacant(entry), + } + } + + /// Provides shared access to the key and owned access to the value of + /// an occupied entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// let entry = map + /// .entry("poneyland") + /// .and_replace_entry_with(|_k, _v| panic!()); + /// + /// match entry { + /// Entry::Vacant(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// } + /// Entry::Occupied(_) => panic!(), + /// } + /// + /// map.insert("poneyland", 42); + /// + /// let entry = map + /// .entry("poneyland") + /// .and_replace_entry_with(|k, v| { + /// assert_eq!(k, &"poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }); + /// + /// match entry { + /// Entry::Occupied(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// assert_eq!(e.get(), &43); + /// } + /// Entry::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = map + /// .entry("poneyland") + /// .and_replace_entry_with(|_k, _v| None); + /// + /// match entry { + /// Entry::Vacant(e) => assert_eq!(e.key(), &"poneyland"), + /// Entry::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_replace_entry_with(self, f: F) -> Self + where + F: FnOnce(&K, V) -> Option, + { + match self { + Entry::Occupied(entry) => entry.replace_entry_with(f), + Entry::Vacant(_) => self, + } + } +} + +impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { + /// Ensures a value is in the entry by inserting the default value if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, Option> = HashMap::new(); + /// + /// // nonexistent key + /// map.entry("poneyland").or_default(); + /// assert_eq!(map["poneyland"], None); + /// + /// map.insert("horseland", Some(3)); + /// + /// // existing key + /// assert_eq!(map.entry("horseland").or_default(), &mut Some(3)); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_default(self) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry.into_mut(), + Entry::Vacant(entry) => entry.insert(Default::default()), + } + } +} + +impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { + /// Gets a reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// match map.entry("poneyland") { + /// Entry::Vacant(_) => panic!(), + /// Entry::Occupied(entry) => assert_eq!(entry.key(), &"poneyland"), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + unsafe { &self.elem.as_ref().0 } + } + + /// Take the ownership of the key and value from the map. + /// Keeps the allocated memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// // The map is empty + /// assert!(map.is_empty() && map.capacity() == 0); + /// + /// map.entry("poneyland").or_insert(12); + /// let capacity_before_remove = map.capacity(); + /// + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// // We delete the entry from the map. + /// assert_eq!(o.remove_entry(), ("poneyland", 12)); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// // Now map hold none elements but capacity is equal to the old one + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.table.table.remove(self.elem) } + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// match map.entry("poneyland") { + /// Entry::Vacant(_) => panic!(), + /// Entry::Occupied(entry) => assert_eq!(entry.get(), &12), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &V { + unsafe { &self.elem.as_ref().1 } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// If you need a reference to the `OccupiedEntry` which may outlive the + /// destruction of the `Entry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let Entry::Occupied(mut o) = map.entry("poneyland") { + /// *o.get_mut() += 10; + /// assert_eq!(*o.get(), 22); + /// + /// // We can use the same Entry multiple times. + /// *o.get_mut() += 2; + /// } + /// + /// assert_eq!(map["poneyland"], 24); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Converts the OccupiedEntry into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself. + /// + /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// + /// let value: &mut u32; + /// match map.entry("poneyland") { + /// Entry::Occupied(entry) => value = entry.into_mut(), + /// Entry::Vacant(_) => panic!(), + /// } + /// *value += 10; + /// + /// assert_eq!(map["poneyland"], 22); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_mut(self) -> &'a mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.entry("poneyland").or_insert(12); + /// + /// if let Entry::Occupied(mut o) = map.entry("poneyland") { + /// assert_eq!(o.insert(15), 12); + /// } + /// + /// assert_eq!(map["poneyland"], 15); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Takes the value out of the entry, and returns it. + /// Keeps the allocated memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// // The map is empty + /// assert!(map.is_empty() && map.capacity() == 0); + /// + /// map.entry("poneyland").or_insert(12); + /// let capacity_before_remove = map.capacity(); + /// + /// if let Entry::Occupied(o) = map.entry("poneyland") { + /// assert_eq!(o.remove(), 12); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// // Now map hold none elements but capacity is equal to the old one + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Replaces the entry, returning the old key and value. The new key in the hash map will be + /// the key used to create this entry. + /// + /// # Panics + /// + /// Will panic if this OccupiedEntry was created through [`Entry::insert`]. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// use std::rc::Rc; + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// let key_one = Rc::new("Stringthing".to_string()); + /// let key_two = Rc::new("Stringthing".to_string()); + /// + /// map.insert(key_one.clone(), 15); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// match map.entry(key_two.clone()) { + /// Entry::Occupied(entry) => { + /// let (old_key, old_value): (Rc, u32) = entry.replace_entry(16); + /// assert!(Rc::ptr_eq(&key_one, &old_key) && old_value == 15); + /// } + /// Entry::Vacant(_) => panic!(), + /// } + /// + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// assert_eq!(map[&"Stringthing".to_owned()], 16); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry(self, value: V) -> (K, V) { + let entry = unsafe { self.elem.as_mut() }; + + let old_key = mem::replace(&mut entry.0, self.key.unwrap()); + let old_value = mem::replace(&mut entry.1, value); + + (old_key, old_value) + } + + /// Replaces the key in the hash map with the key used to create this entry. + /// + /// # Panics + /// + /// Will panic if this OccupiedEntry was created through [`Entry::insert`]. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// use std::rc::Rc; + /// + /// let mut map: HashMap, usize> = HashMap::with_capacity(6); + /// let mut keys_one: Vec> = Vec::with_capacity(6); + /// let mut keys_two: Vec> = Vec::with_capacity(6); + /// + /// for (value, key) in ["a", "b", "c", "d", "e", "f"].into_iter().enumerate() { + /// let rc_key = Rc::new(key.to_owned()); + /// keys_one.push(rc_key.clone()); + /// map.insert(rc_key.clone(), value); + /// keys_two.push(Rc::new(key.to_owned())); + /// } + /// + /// assert!( + /// keys_one.iter().all(|key| Rc::strong_count(key) == 2) + /// && keys_two.iter().all(|key| Rc::strong_count(key) == 1) + /// ); + /// + /// reclaim_memory(&mut map, &keys_two); + /// + /// assert!( + /// keys_one.iter().all(|key| Rc::strong_count(key) == 1) + /// && keys_two.iter().all(|key| Rc::strong_count(key) == 2) + /// ); + /// + /// fn reclaim_memory(map: &mut HashMap, usize>, keys: &[Rc]) { + /// for key in keys { + /// if let Entry::Occupied(entry) = map.entry(key.clone()) { + /// // Replaces the entry's key with our version of it in `keys`. + /// entry.replace_key(); + /// } + /// } + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_key(self) -> K { + let entry = unsafe { self.elem.as_mut() }; + mem::replace(&mut entry.0, self.key.unwrap()) + } + + /// Provides shared access to the key and owned access to the value of + /// the entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.insert("poneyland", 42); + /// + /// let entry = match map.entry("poneyland") { + /// Entry::Occupied(e) => { + /// e.replace_entry_with(|k, v| { + /// assert_eq!(k, &"poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }) + /// } + /// Entry::Vacant(_) => panic!(), + /// }; + /// + /// match entry { + /// Entry::Occupied(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// assert_eq!(e.get(), &43); + /// } + /// Entry::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = match map.entry("poneyland") { + /// Entry::Occupied(e) => e.replace_entry_with(|_k, _v| None), + /// Entry::Vacant(_) => panic!(), + /// }; + /// + /// match entry { + /// Entry::Vacant(e) => { + /// assert_eq!(e.key(), &"poneyland"); + /// } + /// Entry::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry_with(self, f: F) -> Entry<'a, K, V, S, A> + where + F: FnOnce(&K, V) -> Option, + { + unsafe { + let mut spare_key = None; + + self.table + .table + .replace_bucket_with(self.elem.clone(), |(key, value)| { + if let Some(new_value) = f(&key, value) { + Some((key, new_value)) + } else { + spare_key = Some(key); + None + } + }); + + if let Some(key) = spare_key { + Entry::Vacant(VacantEntry { + hash: self.hash, + key, + table: self.table, + }) + } else { + Entry::Occupied(self) + } + } + } +} + +impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { + /// Gets a reference to the key that would be used when inserting a value + /// through the `VacantEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// assert_eq!(map.entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + &self.key + } + + /// Take ownership of the key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{Entry, HashMap}; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// match map.entry("poneyland") { + /// Entry::Occupied(_) => panic!(), + /// Entry::Vacant(v) => assert_eq!(v.into_key(), "poneyland"), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key(self) -> K { + self.key + } + + /// Sets the value of the entry with the VacantEntry's key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::Entry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let Entry::Vacant(o) = map.entry("poneyland") { + /// o.insert(37); + /// } + /// assert_eq!(map["poneyland"], 37); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> &'a mut V + where + K: Hash, + S: BuildHasher, + { + let table = &mut self.table.table; + let entry = table.insert_entry( + self.hash, + (self.key, value), + make_hasher::(&self.table.hash_builder), + ); + &mut entry.1 + } + + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V, S, A> + where + K: Hash, + S: BuildHasher, + { + let elem = self.table.table.insert( + self.hash, + (self.key, value), + make_hasher::(&self.table.hash_builder), + ); + OccupiedEntry { + hash: self.hash, + key: None, + elem, + table: self.table, + } + } +} + +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { + /// Sets the value of the entry, and returns an OccupiedEntryRef. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// let entry = map.entry_ref("horseyland").insert(37); + /// + /// assert_eq!(entry.key(), "horseyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + match self { + EntryRef::Occupied(mut entry) => { + entry.insert(value); + entry + } + EntryRef::Vacant(entry) => entry.insert_entry(value), + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// + /// // nonexistent key + /// map.entry_ref("poneyland").or_insert(3); + /// assert_eq!(map["poneyland"], 3); + /// + /// // existing key + /// *map.entry_ref("poneyland").or_insert(10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default: V) -> &'a mut V + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + match self { + EntryRef::Occupied(entry) => entry.into_mut(), + EntryRef::Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// + /// // nonexistent key + /// map.entry_ref("poneyland").or_insert_with(|| 3); + /// assert_eq!(map["poneyland"], 3); + /// + /// // existing key + /// *map.entry_ref("poneyland").or_insert_with(|| 10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with V>(self, default: F) -> &'a mut V + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + match self { + EntryRef::Occupied(entry) => entry.into_mut(), + EntryRef::Vacant(entry) => entry.insert(default()), + } + } + + /// Ensures a value is in the entry by inserting, if empty, the result of the default function. + /// This method allows for generating key-derived values for insertion by providing the default + /// function a reference to the key that was moved during the `.entry_ref(key)` method call. + /// + /// The reference to the moved key is provided so that cloning or copying the key is + /// unnecessary, unlike with `.or_insert_with(|| ... )`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// + /// // nonexistent key + /// map.entry_ref("poneyland").or_insert_with_key(|key| key.chars().count()); + /// assert_eq!(map["poneyland"], 9); + /// + /// // existing key + /// *map.entry_ref("poneyland").or_insert_with_key(|key| key.chars().count() * 10) *= 2; + /// assert_eq!(map["poneyland"], 18); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with_key V>(self, default: F) -> &'a mut V + where + K: Hash + Borrow + From<&'b Q>, + S: BuildHasher, + { + match self { + EntryRef::Occupied(entry) => entry.into_mut(), + EntryRef::Vacant(entry) => { + let value = default(entry.key.as_ref()); + entry.insert(value) + } + } + } + + /// Returns a reference to this entry's key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// map.entry_ref("poneyland").or_insert(3); + /// // existing key + /// assert_eq!(map.entry_ref("poneyland").key(), "poneyland"); + /// // nonexistent key + /// assert_eq!(map.entry_ref("horseland").key(), "horseland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &Q + where + K: Borrow, + { + match *self { + EntryRef::Occupied(ref entry) => entry.key(), + EntryRef::Vacant(ref entry) => entry.key(), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// + /// map.entry_ref("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.entry_ref("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut V), + { + match self { + EntryRef::Occupied(mut entry) => { + f(entry.get_mut()); + EntryRef::Occupied(entry) + } + EntryRef::Vacant(entry) => EntryRef::Vacant(entry), + } + } + + /// Provides shared access to the key and owned access to the value of + /// an occupied entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::EntryRef; + /// + /// let mut map: HashMap = HashMap::new(); + /// + /// let entry = map + /// .entry_ref("poneyland") + /// .and_replace_entry_with(|_k, _v| panic!()); + /// + /// match entry { + /// EntryRef::Vacant(e) => { + /// assert_eq!(e.key(), "poneyland"); + /// } + /// EntryRef::Occupied(_) => panic!(), + /// } + /// + /// map.insert("poneyland".to_string(), 42); + /// + /// let entry = map + /// .entry_ref("poneyland") + /// .and_replace_entry_with(|k, v| { + /// assert_eq!(k, "poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }); + /// + /// match entry { + /// EntryRef::Occupied(e) => { + /// assert_eq!(e.key(), "poneyland"); + /// assert_eq!(e.get(), &43); + /// } + /// EntryRef::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = map + /// .entry_ref("poneyland") + /// .and_replace_entry_with(|_k, _v| None); + /// + /// match entry { + /// EntryRef::Vacant(e) => assert_eq!(e.key(), "poneyland"), + /// EntryRef::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_replace_entry_with(self, f: F) -> Self + where + F: FnOnce(&Q, V) -> Option, + K: Borrow, + { + match self { + EntryRef::Occupied(entry) => entry.replace_entry_with(f), + EntryRef::Vacant(_) => self, + } + } +} + +impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { + /// Ensures a value is in the entry by inserting the default value if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap> = HashMap::new(); + /// + /// // nonexistent key + /// map.entry_ref("poneyland").or_default(); + /// assert_eq!(map["poneyland"], None); + /// + /// map.insert("horseland".to_string(), Some(3)); + /// + /// // existing key + /// assert_eq!(map.entry_ref("horseland").or_default(), &mut Some(3)); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_default(self) -> &'a mut V + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + match self { + EntryRef::Occupied(entry) => entry.into_mut(), + EntryRef::Vacant(entry) => entry.insert(Default::default()), + } + } +} + +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> { + /// Gets a reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{EntryRef, HashMap}; + /// + /// let mut map: HashMap = HashMap::new(); + /// map.entry_ref("poneyland").or_insert(12); + /// + /// match map.entry_ref("poneyland") { + /// EntryRef::Vacant(_) => panic!(), + /// EntryRef::Occupied(entry) => assert_eq!(entry.key(), "poneyland"), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &Q + where + K: Borrow, + { + unsafe { &self.elem.as_ref().0 }.borrow() + } + + /// Take the ownership of the key and value from the map. + /// Keeps the allocated memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::EntryRef; + /// + /// let mut map: HashMap = HashMap::new(); + /// // The map is empty + /// assert!(map.is_empty() && map.capacity() == 0); + /// + /// map.entry_ref("poneyland").or_insert(12); + /// let capacity_before_remove = map.capacity(); + /// + /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") { + /// // We delete the entry from the map. + /// assert_eq!(o.remove_entry(), ("poneyland".to_owned(), 12)); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// // Now map hold none elements but capacity is equal to the old one + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.table.table.remove(self.elem) } + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::EntryRef; + /// + /// let mut map: HashMap = HashMap::new(); + /// map.entry_ref("poneyland").or_insert(12); + /// + /// match map.entry_ref("poneyland") { + /// EntryRef::Vacant(_) => panic!(), + /// EntryRef::Occupied(entry) => assert_eq!(entry.get(), &12), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &V { + unsafe { &self.elem.as_ref().1 } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// If you need a reference to the `OccupiedEntryRef` which may outlive the + /// destruction of the `EntryRef` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::EntryRef; + /// + /// let mut map: HashMap = HashMap::new(); + /// map.entry_ref("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let EntryRef::Occupied(mut o) = map.entry_ref("poneyland") { + /// *o.get_mut() += 10; + /// assert_eq!(*o.get(), 22); + /// + /// // We can use the same Entry multiple times. + /// *o.get_mut() += 2; + /// } + /// + /// assert_eq!(map["poneyland"], 24); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Converts the OccupiedEntryRef into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself. + /// + /// If you need multiple references to the `OccupiedEntryRef`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{EntryRef, HashMap}; + /// + /// let mut map: HashMap = HashMap::new(); + /// map.entry_ref("poneyland").or_insert(12); + /// + /// let value: &mut u32; + /// match map.entry_ref("poneyland") { + /// EntryRef::Occupied(entry) => value = entry.into_mut(), + /// EntryRef::Vacant(_) => panic!(), + /// } + /// *value += 10; + /// + /// assert_eq!(map["poneyland"], 22); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_mut(self) -> &'a mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::EntryRef; + /// + /// let mut map: HashMap = HashMap::new(); + /// map.entry_ref("poneyland").or_insert(12); + /// + /// if let EntryRef::Occupied(mut o) = map.entry_ref("poneyland") { + /// assert_eq!(o.insert(15), 12); + /// } + /// + /// assert_eq!(map["poneyland"], 15); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Takes the value out of the entry, and returns it. + /// Keeps the allocated memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::EntryRef; + /// + /// let mut map: HashMap = HashMap::new(); + /// // The map is empty + /// assert!(map.is_empty() && map.capacity() == 0); + /// + /// map.entry_ref("poneyland").or_insert(12); + /// let capacity_before_remove = map.capacity(); + /// + /// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") { + /// assert_eq!(o.remove(), 12); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// // Now map hold none elements but capacity is equal to the old one + /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Replaces the entry, returning the old key and value. The new key in the hash map will be + /// the key used to create this entry. + /// + /// # Panics + /// + /// Will panic if this OccupiedEntry was created through [`EntryRef::insert`]. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{EntryRef, HashMap}; + /// use std::rc::Rc; + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// let key: Rc = Rc::from("Stringthing"); + /// + /// map.insert(key.clone(), 15); + /// assert_eq!(Rc::strong_count(&key), 2); + /// + /// match map.entry_ref("Stringthing") { + /// EntryRef::Occupied(entry) => { + /// let (old_key, old_value): (Rc, u32) = entry.replace_entry(16); + /// assert!(Rc::ptr_eq(&key, &old_key) && old_value == 15); + /// } + /// EntryRef::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(Rc::strong_count(&key), 1); + /// assert_eq!(map["Stringthing"], 16); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry(self, value: V) -> (K, V) + where + K: From<&'b Q>, + { + let entry = unsafe { self.elem.as_mut() }; + + let old_key = mem::replace(&mut entry.0, self.key.unwrap().into_owned()); + let old_value = mem::replace(&mut entry.1, value); + + (old_key, old_value) + } + + /// Replaces the key in the hash map with the key used to create this entry. + /// + /// # Panics + /// + /// Will panic if this OccupiedEntry was created through [`Entry::insert`]. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{EntryRef, HashMap}; + /// use std::rc::Rc; + /// + /// let mut map: HashMap, usize> = HashMap::with_capacity(6); + /// let mut keys: Vec> = Vec::with_capacity(6); + /// + /// for (value, key) in ["a", "b", "c", "d", "e", "f"].into_iter().enumerate() { + /// let rc_key: Rc = Rc::from(key); + /// keys.push(rc_key.clone()); + /// map.insert(rc_key.clone(), value); + /// } + /// + /// assert!(keys.iter().all(|key| Rc::strong_count(key) == 2)); + /// + /// // It doesn't matter that we kind of use a vector with the same keys, + /// // because all keys will be newly created from the references + /// reclaim_memory(&mut map, &keys); + /// + /// assert!(keys.iter().all(|key| Rc::strong_count(key) == 1)); + /// + /// fn reclaim_memory(map: &mut HashMap, usize>, keys: &[Rc]) { + /// for key in keys { + /// if let EntryRef::Occupied(entry) = map.entry_ref(key.as_ref()) { + /// /// Replaces the entry's key with our version of it in `keys`. + /// entry.replace_key(); + /// } + /// } + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_key(self) -> K + where + K: From<&'b Q>, + { + let entry = unsafe { self.elem.as_mut() }; + mem::replace(&mut entry.0, self.key.unwrap().into_owned()) + } + + /// Provides shared access to the key and owned access to the value of + /// the entry and allows to replace or remove it based on the + /// value of the returned option. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::EntryRef; + /// + /// let mut map: HashMap = HashMap::new(); + /// map.insert("poneyland".to_string(), 42); + /// + /// let entry = match map.entry_ref("poneyland") { + /// EntryRef::Occupied(e) => { + /// e.replace_entry_with(|k, v| { + /// assert_eq!(k, "poneyland"); + /// assert_eq!(v, 42); + /// Some(v + 1) + /// }) + /// } + /// EntryRef::Vacant(_) => panic!(), + /// }; + /// + /// match entry { + /// EntryRef::Occupied(e) => { + /// assert_eq!(e.key(), "poneyland"); + /// assert_eq!(e.get(), &43); + /// } + /// EntryRef::Vacant(_) => panic!(), + /// } + /// + /// assert_eq!(map["poneyland"], 43); + /// + /// let entry = match map.entry_ref("poneyland") { + /// EntryRef::Occupied(e) => e.replace_entry_with(|_k, _v| None), + /// EntryRef::Vacant(_) => panic!(), + /// }; + /// + /// match entry { + /// EntryRef::Vacant(e) => { + /// assert_eq!(e.key(), "poneyland"); + /// } + /// EntryRef::Occupied(_) => panic!(), + /// } + /// + /// assert!(!map.contains_key("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry_with(self, f: F) -> EntryRef<'a, 'b, K, Q, V, S, A> + where + F: FnOnce(&Q, V) -> Option, + K: Borrow, + { + unsafe { + let mut spare_key = None; + + self.table + .table + .replace_bucket_with(self.elem.clone(), |(key, value)| { + if let Some(new_value) = f(key.borrow(), value) { + Some((key, new_value)) + } else { + spare_key = Some(KeyOrRef::Owned(key)); + None + } + }); + + if let Some(key) = spare_key { + EntryRef::Vacant(VacantEntryRef { + hash: self.hash, + key, + table: self.table, + }) + } else { + EntryRef::Occupied(self) + } + } + } +} + +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K, Q, V, S, A> { + /// Gets a reference to the key that would be used when inserting a value + /// through the `VacantEntryRef`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap = HashMap::new(); + /// let key: &str = "poneyland"; + /// assert_eq!(map.entry_ref(key).key(), "poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &Q + where + K: Borrow, + { + self.key.as_ref() + } + + /// Take ownership of the key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{EntryRef, HashMap}; + /// + /// let mut map: HashMap = HashMap::new(); + /// let key: &str = "poneyland"; + /// + /// match map.entry_ref(key) { + /// EntryRef::Occupied(_) => panic!(), + /// EntryRef::Vacant(v) => assert_eq!(v.into_key(), "poneyland".to_owned()), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key(self) -> K + where + K: From<&'b Q>, + { + self.key.into_owned() + } + + /// Sets the value of the entry with the VacantEntryRef's key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::EntryRef; + /// + /// let mut map: HashMap = HashMap::new(); + /// let key: &str = "poneyland"; + /// + /// if let EntryRef::Vacant(o) = map.entry_ref(key) { + /// o.insert(37); + /// } + /// assert_eq!(map["poneyland"], 37); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> &'a mut V + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + let table = &mut self.table.table; + let entry = table.insert_entry( + self.hash, + (self.key.into_owned(), value), + make_hasher::(&self.table.hash_builder), + ); + &mut entry.1 + } + + #[cfg_attr(feature = "inline-more", inline)] + fn insert_entry(self, value: V) -> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> + where + K: Hash + From<&'b Q>, + S: BuildHasher, + { + let elem = self.table.table.insert( + self.hash, + (self.key.into_owned(), value), + make_hasher::(&self.table.hash_builder), + ); + OccupiedEntryRef { + hash: self.hash, + key: None, + elem, + table: self.table, + } + } +} + +impl FromIterator<(K, V)> for HashMap +where + K: Eq + Hash, + S: BuildHasher + Default, + A: Default + Allocator + Clone, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn from_iter>(iter: T) -> Self { + let iter = iter.into_iter(); + let mut map = + Self::with_capacity_and_hasher_in(iter.size_hint().0, S::default(), A::default()); + iter.for_each(|(k, v)| { + map.insert(k, v); + }); + map + } +} + +/// Inserts all new key-values from the iterator and replaces values with existing +/// keys with new values returned from the iterator. +impl Extend<(K, V)> for HashMap +where + K: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + /// Inserts all new key-values from the iterator to existing `HashMap`. + /// Replace values with existing keys with new values returned from the iterator. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, 100); + /// + /// let some_iter = [(1, 1), (2, 2)].into_iter(); + /// map.extend(some_iter); + /// // Replace values with existing keys with new values returned from the iterator. + /// // So that the map.get(&1) doesn't return Some(&100). + /// assert_eq!(map.get(&1), Some(&1)); + /// + /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; + /// map.extend(some_vec); + /// + /// let some_arr = [(5, 5), (6, 6)]; + /// map.extend(some_arr); + /// let old_map_len = map.len(); + /// + /// // You can also extend from another HashMap + /// let mut new_map = HashMap::new(); + /// new_map.extend(map); + /// assert_eq!(new_map.len(), old_map_len); + /// + /// let mut vec: Vec<_> = new_map.into_iter().collect(); + /// // The `IntoIter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: T) { + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case. + let iter = iter.into_iter(); + let reserve = if self.is_empty() { + iter.size_hint().0 + } else { + (iter.size_hint().0 + 1) / 2 + }; + self.reserve(reserve); + iter.for_each(move |(k, v)| { + self.insert(k, v); + }); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, (k, v): (K, V)) { + self.insert(k, v); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + // Keys may be already present or show multiple times in the iterator. + // Reserve the entire hint lower bound if the map is empty. + // Otherwise reserve half the hint (rounded up), so the map + // will only resize twice in the worst case. + let reserve = if self.is_empty() { + additional + } else { + (additional + 1) / 2 + }; + self.reserve(reserve); + } +} + +/// Inserts all new key-values from the iterator and replaces values with existing +/// keys with new values returned from the iterator. +impl<'a, K, V, S, A> Extend<(&'a K, &'a V)> for HashMap +where + K: Eq + Hash + Copy, + V: Copy, + S: BuildHasher, + A: Allocator + Clone, +{ + /// Inserts all new key-values from the iterator to existing `HashMap`. + /// Replace values with existing keys with new values returned from the iterator. + /// The keys and values must implement [`Copy`] trait. + /// + /// [`Copy`]: https://doc.rust-lang.org/core/marker/trait.Copy.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, 100); + /// + /// let arr = [(1, 1), (2, 2)]; + /// let some_iter = arr.iter().map(|&(k, v)| (k, v)); + /// map.extend(some_iter); + /// // Replace values with existing keys with new values returned from the iterator. + /// // So that the map.get(&1) doesn't return Some(&100). + /// assert_eq!(map.get(&1), Some(&1)); + /// + /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; + /// map.extend(some_vec.iter().map(|&(k, v)| (k, v))); + /// + /// let some_arr = [(5, 5), (6, 6)]; + /// map.extend(some_arr.iter().map(|&(k, v)| (k, v))); + /// + /// // You can also extend from another HashMap + /// let mut new_map = HashMap::new(); + /// new_map.extend(&map); + /// assert_eq!(new_map, map); + /// + /// let mut vec: Vec<_> = new_map.into_iter().collect(); + /// // The `IntoIter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: T) { + self.extend(iter.into_iter().map(|(&key, &value)| (key, value))); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, (k, v): (&'a K, &'a V)) { + self.insert(*k, *v); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(K, V)>::extend_reserve(self, additional); + } +} + +/// Inserts all new key-values from the iterator and replaces values with existing +/// keys with new values returned from the iterator. +impl<'a, K, V, S, A> Extend<&'a (K, V)> for HashMap +where + K: Eq + Hash + Copy, + V: Copy, + S: BuildHasher, + A: Allocator + Clone, +{ + /// Inserts all new key-values from the iterator to existing `HashMap`. + /// Replace values with existing keys with new values returned from the iterator. + /// The keys and values must implement [`Copy`] trait. + /// + /// [`Copy`]: https://doc.rust-lang.org/core/marker/trait.Copy.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::HashMap; + /// + /// let mut map = HashMap::new(); + /// map.insert(1, 100); + /// + /// let arr = [(1, 1), (2, 2)]; + /// let some_iter = arr.iter(); + /// map.extend(some_iter); + /// // Replace values with existing keys with new values returned from the iterator. + /// // So that the map.get(&1) doesn't return Some(&100). + /// assert_eq!(map.get(&1), Some(&1)); + /// + /// let some_vec: Vec<_> = vec![(3, 3), (4, 4)]; + /// map.extend(&some_vec); + /// + /// let some_arr = [(5, 5), (6, 6)]; + /// map.extend(&some_arr); + /// + /// let mut vec: Vec<_> = map.into_iter().collect(); + /// // The `IntoIter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: T) { + self.extend(iter.into_iter().map(|&(key, value)| (key, value))); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, &(k, v): &'a (K, V)) { + self.insert(k, v); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(K, V)>::extend_reserve(self, additional); + } +} + +#[allow(dead_code)] +fn assert_covariance() { + fn map_key<'new>(v: HashMap<&'static str, u8>) -> HashMap<&'new str, u8> { + v + } + fn map_val<'new>(v: HashMap) -> HashMap { + v + } + fn iter_key<'a, 'new>(v: Iter<'a, &'static str, u8>) -> Iter<'a, &'new str, u8> { + v + } + fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> { + v + } + fn into_iter_key<'new, A: Allocator + Clone>( + v: IntoIter<&'static str, u8, A>, + ) -> IntoIter<&'new str, u8, A> { + v + } + fn into_iter_val<'new, A: Allocator + Clone>( + v: IntoIter, + ) -> IntoIter { + v + } + fn keys_key<'a, 'new>(v: Keys<'a, &'static str, u8>) -> Keys<'a, &'new str, u8> { + v + } + fn keys_val<'a, 'new>(v: Keys<'a, u8, &'static str>) -> Keys<'a, u8, &'new str> { + v + } + fn values_key<'a, 'new>(v: Values<'a, &'static str, u8>) -> Values<'a, &'new str, u8> { + v + } + fn values_val<'a, 'new>(v: Values<'a, u8, &'static str>) -> Values<'a, u8, &'new str> { + v + } + fn drain<'new>( + d: Drain<'static, &'static str, &'static str>, + ) -> Drain<'new, &'new str, &'new str> { + d + } +} + +#[cfg(test)] +mod test_map { + use super::DefaultHashBuilder; + use super::Entry::{Occupied, Vacant}; + use super::EntryRef; + use super::{HashMap, RawEntryMut}; + use rand::{rngs::SmallRng, Rng, SeedableRng}; + use std::borrow::ToOwned; + use std::cell::RefCell; + use std::usize; + use std::vec::Vec; + + #[test] + fn test_zero_capacities() { + type HM = HashMap; + + let m = HM::new(); + assert_eq!(m.capacity(), 0); + + let m = HM::default(); + assert_eq!(m.capacity(), 0); + + let m = HM::with_hasher(DefaultHashBuilder::default()); + assert_eq!(m.capacity(), 0); + + let m = HM::with_capacity(0); + assert_eq!(m.capacity(), 0); + + let m = HM::with_capacity_and_hasher(0, DefaultHashBuilder::default()); + assert_eq!(m.capacity(), 0); + + let mut m = HM::new(); + m.insert(1, 1); + m.insert(2, 2); + m.remove(&1); + m.remove(&2); + m.shrink_to_fit(); + assert_eq!(m.capacity(), 0); + + let mut m = HM::new(); + m.reserve(0); + assert_eq!(m.capacity(), 0); + } + + #[test] + fn test_create_capacity_zero() { + let mut m = HashMap::with_capacity(0); + + assert!(m.insert(1, 1).is_none()); + + assert!(m.contains_key(&1)); + assert!(!m.contains_key(&0)); + } + + #[test] + fn test_insert() { + let mut m = HashMap::new(); + assert_eq!(m.len(), 0); + assert!(m.insert(1, 2).is_none()); + assert_eq!(m.len(), 1); + assert!(m.insert(2, 4).is_none()); + assert_eq!(m.len(), 2); + assert_eq!(*m.get(&1).unwrap(), 2); + assert_eq!(*m.get(&2).unwrap(), 4); + } + + #[test] + fn test_clone() { + let mut m = HashMap::new(); + assert_eq!(m.len(), 0); + assert!(m.insert(1, 2).is_none()); + assert_eq!(m.len(), 1); + assert!(m.insert(2, 4).is_none()); + assert_eq!(m.len(), 2); + #[allow(clippy::redundant_clone)] + let m2 = m.clone(); + assert_eq!(*m2.get(&1).unwrap(), 2); + assert_eq!(*m2.get(&2).unwrap(), 4); + assert_eq!(m2.len(), 2); + } + + #[test] + fn test_clone_from() { + let mut m = HashMap::new(); + let mut m2 = HashMap::new(); + assert_eq!(m.len(), 0); + assert!(m.insert(1, 2).is_none()); + assert_eq!(m.len(), 1); + assert!(m.insert(2, 4).is_none()); + assert_eq!(m.len(), 2); + m2.clone_from(&m); + assert_eq!(*m2.get(&1).unwrap(), 2); + assert_eq!(*m2.get(&2).unwrap(), 4); + assert_eq!(m2.len(), 2); + } + + thread_local! { static DROP_VECTOR: RefCell> = RefCell::new(Vec::new()) } + + #[derive(Hash, PartialEq, Eq)] + struct Droppable { + k: usize, + } + + impl Droppable { + fn new(k: usize) -> Droppable { + DROP_VECTOR.with(|slot| { + slot.borrow_mut()[k] += 1; + }); + + Droppable { k } + } + } + + impl Drop for Droppable { + fn drop(&mut self) { + DROP_VECTOR.with(|slot| { + slot.borrow_mut()[self.k] -= 1; + }); + } + } + + impl Clone for Droppable { + fn clone(&self) -> Self { + Droppable::new(self.k) + } + } + + #[test] + fn test_drops() { + DROP_VECTOR.with(|slot| { + *slot.borrow_mut() = vec![0; 200]; + }); + + { + let mut m = HashMap::new(); + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + + for i in 0..100 { + let d1 = Droppable::new(i); + let d2 = Droppable::new(i + 100); + m.insert(d1, d2); + } + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 1); + } + }); + + for i in 0..50 { + let k = Droppable::new(i); + let v = m.remove(&k); + + assert!(v.is_some()); + + DROP_VECTOR.with(|v| { + assert_eq!(v.borrow()[i], 1); + assert_eq!(v.borrow()[i + 100], 1); + }); + } + + DROP_VECTOR.with(|v| { + for i in 0..50 { + assert_eq!(v.borrow()[i], 0); + assert_eq!(v.borrow()[i + 100], 0); + } + + for i in 50..100 { + assert_eq!(v.borrow()[i], 1); + assert_eq!(v.borrow()[i + 100], 1); + } + }); + } + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + } + + #[test] + fn test_into_iter_drops() { + DROP_VECTOR.with(|v| { + *v.borrow_mut() = vec![0; 200]; + }); + + let hm = { + let mut hm = HashMap::new(); + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + + for i in 0..100 { + let d1 = Droppable::new(i); + let d2 = Droppable::new(i + 100); + hm.insert(d1, d2); + } + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 1); + } + }); + + hm + }; + + // By the way, ensure that cloning doesn't screw up the dropping. + drop(hm.clone()); + + { + let mut half = hm.into_iter().take(50); + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 1); + } + }); + + #[allow(clippy::let_underscore_drop)] // kind-of a false positive + for _ in half.by_ref() {} + + DROP_VECTOR.with(|v| { + let nk = (0..100).filter(|&i| v.borrow()[i] == 1).count(); + + let nv = (0..100).filter(|&i| v.borrow()[i + 100] == 1).count(); + + assert_eq!(nk, 50); + assert_eq!(nv, 50); + }); + }; + + DROP_VECTOR.with(|v| { + for i in 0..200 { + assert_eq!(v.borrow()[i], 0); + } + }); + } + + #[test] + fn test_empty_remove() { + let mut m: HashMap = HashMap::new(); + assert_eq!(m.remove(&0), None); + } + + #[test] + fn test_empty_entry() { + let mut m: HashMap = HashMap::new(); + match m.entry(0) { + Occupied(_) => panic!(), + Vacant(_) => {} + } + assert!(*m.entry(0).or_insert(true)); + assert_eq!(m.len(), 1); + } + + #[test] + fn test_empty_entry_ref() { + let mut m: HashMap = HashMap::new(); + match m.entry_ref("poneyland") { + EntryRef::Occupied(_) => panic!(), + EntryRef::Vacant(_) => {} + } + assert!(*m.entry_ref("poneyland").or_insert(true)); + assert_eq!(m.len(), 1); + } + + #[test] + fn test_empty_iter() { + let mut m: HashMap = HashMap::new(); + assert_eq!(m.drain().next(), None); + assert_eq!(m.keys().next(), None); + assert_eq!(m.values().next(), None); + assert_eq!(m.values_mut().next(), None); + assert_eq!(m.iter().next(), None); + assert_eq!(m.iter_mut().next(), None); + assert_eq!(m.len(), 0); + assert!(m.is_empty()); + assert_eq!(m.into_iter().next(), None); + } + + #[test] + #[cfg_attr(miri, ignore)] // FIXME: takes too long + fn test_lots_of_insertions() { + let mut m = HashMap::new(); + + // Try this a few times to make sure we never screw up the hashmap's + // internal state. + for _ in 0..10 { + assert!(m.is_empty()); + + for i in 1..1001 { + assert!(m.insert(i, i).is_none()); + + for j in 1..=i { + let r = m.get(&j); + assert_eq!(r, Some(&j)); + } + + for j in i + 1..1001 { + let r = m.get(&j); + assert_eq!(r, None); + } + } + + for i in 1001..2001 { + assert!(!m.contains_key(&i)); + } + + // remove forwards + for i in 1..1001 { + assert!(m.remove(&i).is_some()); + + for j in 1..=i { + assert!(!m.contains_key(&j)); + } + + for j in i + 1..1001 { + assert!(m.contains_key(&j)); + } + } + + for i in 1..1001 { + assert!(!m.contains_key(&i)); + } + + for i in 1..1001 { + assert!(m.insert(i, i).is_none()); + } + + // remove backwards + for i in (1..1001).rev() { + assert!(m.remove(&i).is_some()); + + for j in i..1001 { + assert!(!m.contains_key(&j)); + } + + for j in 1..i { + assert!(m.contains_key(&j)); + } + } + } + } + + #[test] + fn test_find_mut() { + let mut m = HashMap::new(); + assert!(m.insert(1, 12).is_none()); + assert!(m.insert(2, 8).is_none()); + assert!(m.insert(5, 14).is_none()); + let new = 100; + match m.get_mut(&5) { + None => panic!(), + Some(x) => *x = new, + } + assert_eq!(m.get(&5), Some(&new)); + } + + #[test] + fn test_insert_overwrite() { + let mut m = HashMap::new(); + assert!(m.insert(1, 2).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert!(m.insert(1, 3).is_some()); + assert_eq!(*m.get(&1).unwrap(), 3); + } + + #[test] + fn test_insert_conflicts() { + let mut m = HashMap::with_capacity(4); + assert!(m.insert(1, 2).is_none()); + assert!(m.insert(5, 3).is_none()); + assert!(m.insert(9, 4).is_none()); + assert_eq!(*m.get(&9).unwrap(), 4); + assert_eq!(*m.get(&5).unwrap(), 3); + assert_eq!(*m.get(&1).unwrap(), 2); + } + + #[test] + fn test_conflict_remove() { + let mut m = HashMap::with_capacity(4); + assert!(m.insert(1, 2).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert!(m.insert(5, 3).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert_eq!(*m.get(&5).unwrap(), 3); + assert!(m.insert(9, 4).is_none()); + assert_eq!(*m.get(&1).unwrap(), 2); + assert_eq!(*m.get(&5).unwrap(), 3); + assert_eq!(*m.get(&9).unwrap(), 4); + assert!(m.remove(&1).is_some()); + assert_eq!(*m.get(&9).unwrap(), 4); + assert_eq!(*m.get(&5).unwrap(), 3); + } + + #[test] + fn test_insert_unique_unchecked() { + let mut map = HashMap::new(); + let (k1, v1) = map.insert_unique_unchecked(10, 11); + assert_eq!((&10, &mut 11), (k1, v1)); + let (k2, v2) = map.insert_unique_unchecked(20, 21); + assert_eq!((&20, &mut 21), (k2, v2)); + assert_eq!(Some(&11), map.get(&10)); + assert_eq!(Some(&21), map.get(&20)); + assert_eq!(None, map.get(&30)); + } + + #[test] + fn test_is_empty() { + let mut m = HashMap::with_capacity(4); + assert!(m.insert(1, 2).is_none()); + assert!(!m.is_empty()); + assert!(m.remove(&1).is_some()); + assert!(m.is_empty()); + } + + #[test] + fn test_remove() { + let mut m = HashMap::new(); + m.insert(1, 2); + assert_eq!(m.remove(&1), Some(2)); + assert_eq!(m.remove(&1), None); + } + + #[test] + fn test_remove_entry() { + let mut m = HashMap::new(); + m.insert(1, 2); + assert_eq!(m.remove_entry(&1), Some((1, 2))); + assert_eq!(m.remove(&1), None); + } + + #[test] + fn test_iterate() { + let mut m = HashMap::with_capacity(4); + for i in 0..32 { + assert!(m.insert(i, i * 2).is_none()); + } + assert_eq!(m.len(), 32); + + let mut observed: u32 = 0; + + for (k, v) in &m { + assert_eq!(*v, *k * 2); + observed |= 1 << *k; + } + assert_eq!(observed, 0xFFFF_FFFF); + } + + #[test] + fn test_keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_iter().collect(); + let keys: Vec<_> = map.keys().copied().collect(); + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn test_values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_iter().collect(); + let values: Vec<_> = map.values().copied().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + fn test_values_mut() { + let vec = vec![(1, 1), (2, 2), (3, 3)]; + let mut map: HashMap<_, _> = vec.into_iter().collect(); + for value in map.values_mut() { + *value *= 2; + } + let values: Vec<_> = map.values().copied().collect(); + assert_eq!(values.len(), 3); + assert!(values.contains(&2)); + assert!(values.contains(&4)); + assert!(values.contains(&6)); + } + + #[test] + fn test_into_keys() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_iter().collect(); + let keys: Vec<_> = map.into_keys().collect(); + + assert_eq!(keys.len(), 3); + assert!(keys.contains(&1)); + assert!(keys.contains(&2)); + assert!(keys.contains(&3)); + } + + #[test] + fn test_into_values() { + let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; + let map: HashMap<_, _> = vec.into_iter().collect(); + let values: Vec<_> = map.into_values().collect(); + + assert_eq!(values.len(), 3); + assert!(values.contains(&'a')); + assert!(values.contains(&'b')); + assert!(values.contains(&'c')); + } + + #[test] + fn test_find() { + let mut m = HashMap::new(); + assert!(m.get(&1).is_none()); + m.insert(1, 2); + match m.get(&1) { + None => panic!(), + Some(v) => assert_eq!(*v, 2), + } + } + + #[test] + fn test_eq() { + let mut m1 = HashMap::new(); + m1.insert(1, 2); + m1.insert(2, 3); + m1.insert(3, 4); + + let mut m2 = HashMap::new(); + m2.insert(1, 2); + m2.insert(2, 3); + + assert!(m1 != m2); + + m2.insert(3, 4); + + assert_eq!(m1, m2); + } + + #[test] + fn test_show() { + let mut map = HashMap::new(); + let empty: HashMap = HashMap::new(); + + map.insert(1, 2); + map.insert(3, 4); + + let map_str = format!("{:?}", map); + + assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}"); + assert_eq!(format!("{:?}", empty), "{}"); + } + + #[test] + fn test_expand() { + let mut m = HashMap::new(); + + assert_eq!(m.len(), 0); + assert!(m.is_empty()); + + let mut i = 0; + let old_raw_cap = m.raw_capacity(); + while old_raw_cap == m.raw_capacity() { + m.insert(i, i); + i += 1; + } + + assert_eq!(m.len(), i); + assert!(!m.is_empty()); + } + + #[test] + fn test_behavior_resize_policy() { + let mut m = HashMap::new(); + + assert_eq!(m.len(), 0); + assert_eq!(m.raw_capacity(), 1); + assert!(m.is_empty()); + + m.insert(0, 0); + m.remove(&0); + assert!(m.is_empty()); + let initial_raw_cap = m.raw_capacity(); + m.reserve(initial_raw_cap); + let raw_cap = m.raw_capacity(); + + assert_eq!(raw_cap, initial_raw_cap * 2); + + let mut i = 0; + for _ in 0..raw_cap * 3 / 4 { + m.insert(i, i); + i += 1; + } + // three quarters full + + assert_eq!(m.len(), i); + assert_eq!(m.raw_capacity(), raw_cap); + + for _ in 0..raw_cap / 4 { + m.insert(i, i); + i += 1; + } + // half full + + let new_raw_cap = m.raw_capacity(); + assert_eq!(new_raw_cap, raw_cap * 2); + + for _ in 0..raw_cap / 2 - 1 { + i -= 1; + m.remove(&i); + assert_eq!(m.raw_capacity(), new_raw_cap); + } + // A little more than one quarter full. + m.shrink_to_fit(); + assert_eq!(m.raw_capacity(), raw_cap); + // again, a little more than half full + for _ in 0..raw_cap / 2 { + i -= 1; + m.remove(&i); + } + m.shrink_to_fit(); + + assert_eq!(m.len(), i); + assert!(!m.is_empty()); + assert_eq!(m.raw_capacity(), initial_raw_cap); + } + + #[test] + fn test_reserve_shrink_to_fit() { + let mut m = HashMap::new(); + m.insert(0, 0); + m.remove(&0); + assert!(m.capacity() >= m.len()); + for i in 0..128 { + m.insert(i, i); + } + m.reserve(256); + + let usable_cap = m.capacity(); + for i in 128..(128 + 256) { + m.insert(i, i); + assert_eq!(m.capacity(), usable_cap); + } + + for i in 100..(128 + 256) { + assert_eq!(m.remove(&i), Some(i)); + } + m.shrink_to_fit(); + + assert_eq!(m.len(), 100); + assert!(!m.is_empty()); + assert!(m.capacity() >= m.len()); + + for i in 0..100 { + assert_eq!(m.remove(&i), Some(i)); + } + m.shrink_to_fit(); + m.insert(0, 0); + + assert_eq!(m.len(), 1); + assert!(m.capacity() >= m.len()); + assert_eq!(m.remove(&0), Some(0)); + } + + #[test] + fn test_from_iter() { + let xs = [(1, 1), (2, 2), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.iter().copied().collect(); + + for &(k, v) in &xs { + assert_eq!(map.get(&k), Some(&v)); + } + + assert_eq!(map.iter().len(), xs.len() - 1); + } + + #[test] + fn test_size_hint() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.iter().copied().collect(); + + let mut iter = map.iter(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.size_hint(), (3, Some(3))); + } + + #[test] + fn test_iter_len() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let map: HashMap<_, _> = xs.iter().copied().collect(); + + let mut iter = map.iter(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.len(), 3); + } + + #[test] + fn test_mut_size_hint() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let mut map: HashMap<_, _> = xs.iter().copied().collect(); + + let mut iter = map.iter_mut(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.size_hint(), (3, Some(3))); + } + + #[test] + fn test_iter_mut_len() { + let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)]; + + let mut map: HashMap<_, _> = xs.iter().copied().collect(); + + let mut iter = map.iter_mut(); + + for _ in iter.by_ref().take(3) {} + + assert_eq!(iter.len(), 3); + } + + #[test] + fn test_index() { + let mut map = HashMap::new(); + + map.insert(1, 2); + map.insert(2, 1); + map.insert(3, 4); + + assert_eq!(map[&2], 1); + } + + #[test] + #[should_panic] + fn test_index_nonexistent() { + let mut map = HashMap::new(); + + map.insert(1, 2); + map.insert(2, 1); + map.insert(3, 4); + + #[allow(clippy::no_effect)] // false positive lint + map[&4]; + } + + #[test] + fn test_entry() { + let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; + + let mut map: HashMap<_, _> = xs.iter().copied().collect(); + + // Existing key (insert) + match map.entry(1) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + assert_eq!(view.get(), &10); + assert_eq!(view.insert(100), 10); + } + } + assert_eq!(map.get(&1).unwrap(), &100); + assert_eq!(map.len(), 6); + + // Existing key (update) + match map.entry(2) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + let v = view.get_mut(); + let new_v = (*v) * 10; + *v = new_v; + } + } + assert_eq!(map.get(&2).unwrap(), &200); + assert_eq!(map.len(), 6); + + // Existing key (take) + match map.entry(3) { + Vacant(_) => unreachable!(), + Occupied(view) => { + assert_eq!(view.remove(), 30); + } + } + assert_eq!(map.get(&3), None); + assert_eq!(map.len(), 5); + + // Inexistent key (insert) + match map.entry(10) { + Occupied(_) => unreachable!(), + Vacant(view) => { + assert_eq!(*view.insert(1000), 1000); + } + } + assert_eq!(map.get(&10).unwrap(), &1000); + assert_eq!(map.len(), 6); + } + + #[test] + fn test_entry_ref() { + let xs = [ + ("One".to_owned(), 10), + ("Two".to_owned(), 20), + ("Three".to_owned(), 30), + ("Four".to_owned(), 40), + ("Five".to_owned(), 50), + ("Six".to_owned(), 60), + ]; + + let mut map: HashMap<_, _> = xs.iter().cloned().collect(); + + // Existing key (insert) + match map.entry_ref("One") { + EntryRef::Vacant(_) => unreachable!(), + EntryRef::Occupied(mut view) => { + assert_eq!(view.get(), &10); + assert_eq!(view.insert(100), 10); + } + } + assert_eq!(map.get("One").unwrap(), &100); + assert_eq!(map.len(), 6); + + // Existing key (update) + match map.entry_ref("Two") { + EntryRef::Vacant(_) => unreachable!(), + EntryRef::Occupied(mut view) => { + let v = view.get_mut(); + let new_v = (*v) * 10; + *v = new_v; + } + } + assert_eq!(map.get("Two").unwrap(), &200); + assert_eq!(map.len(), 6); + + // Existing key (take) + match map.entry_ref("Three") { + EntryRef::Vacant(_) => unreachable!(), + EntryRef::Occupied(view) => { + assert_eq!(view.remove(), 30); + } + } + assert_eq!(map.get("Three"), None); + assert_eq!(map.len(), 5); + + // Inexistent key (insert) + match map.entry_ref("Ten") { + EntryRef::Occupied(_) => unreachable!(), + EntryRef::Vacant(view) => { + assert_eq!(*view.insert(1000), 1000); + } + } + assert_eq!(map.get("Ten").unwrap(), &1000); + assert_eq!(map.len(), 6); + } + + #[test] + fn test_entry_take_doesnt_corrupt() { + #![allow(deprecated)] //rand + // Test for #19292 + fn check(m: &HashMap) { + for k in m.keys() { + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); + } + } + + let mut m = HashMap::new(); + + let mut rng = { + let seed = u64::from_le_bytes(*b"testseed"); + SmallRng::seed_from_u64(seed) + }; + + // Populate the map with some items. + for _ in 0..50 { + let x = rng.gen_range(-10..10); + m.insert(x, ()); + } + + for _ in 0..1000 { + let x = rng.gen_range(-10..10); + match m.entry(x) { + Vacant(_) => {} + Occupied(e) => { + e.remove(); + } + } + + check(&m); + } + } + + #[test] + fn test_entry_ref_take_doesnt_corrupt() { + #![allow(deprecated)] //rand + // Test for #19292 + fn check(m: &HashMap) { + for k in m.keys() { + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); + } + } + + let mut m = HashMap::new(); + + let mut rng = { + let seed = u64::from_le_bytes(*b"testseed"); + SmallRng::seed_from_u64(seed) + }; + + // Populate the map with some items. + for _ in 0..50 { + let mut x = std::string::String::with_capacity(1); + x.push(rng.gen_range('a'..='z')); + m.insert(x, ()); + } + + for _ in 0..1000 { + let mut x = std::string::String::with_capacity(1); + x.push(rng.gen_range('a'..='z')); + match m.entry_ref(x.as_str()) { + EntryRef::Vacant(_) => {} + EntryRef::Occupied(e) => { + e.remove(); + } + } + + check(&m); + } + } + + #[test] + fn test_extend_ref_k_ref_v() { + let mut a = HashMap::new(); + a.insert(1, "one"); + let mut b = HashMap::new(); + b.insert(2, "two"); + b.insert(3, "three"); + + a.extend(&b); + + assert_eq!(a.len(), 3); + assert_eq!(a[&1], "one"); + assert_eq!(a[&2], "two"); + assert_eq!(a[&3], "three"); + } + + #[test] + fn test_extend_ref_kv_tuple() { + use std::ops::AddAssign; + let mut a = HashMap::new(); + a.insert(0, 0); + + fn create_arr + Copy, const N: usize>(start: T, step: T) -> [(T, T); N] { + let mut outs: [(T, T); N] = [(start, start); N]; + let mut element = step; + outs.iter_mut().skip(1).for_each(|(k, v)| { + *k += element; + *v += element; + element += step; + }); + outs + } + + let for_iter: Vec<_> = (0..100).map(|i| (i, i)).collect(); + let iter = for_iter.iter(); + let vec: Vec<_> = (100..200).map(|i| (i, i)).collect(); + a.extend(iter); + a.extend(&vec); + a.extend(&create_arr::(200, 1)); + + assert_eq!(a.len(), 300); + + for item in 0..300 { + assert_eq!(a[&item], item); + } + } + + #[test] + fn test_capacity_not_less_than_len() { + let mut a = HashMap::new(); + let mut item = 0; + + for _ in 0..116 { + a.insert(item, 0); + item += 1; + } + + assert!(a.capacity() > a.len()); + + let free = a.capacity() - a.len(); + for _ in 0..free { + a.insert(item, 0); + item += 1; + } + + assert_eq!(a.len(), a.capacity()); + + // Insert at capacity should cause allocation. + a.insert(item, 0); + assert!(a.capacity() > a.len()); + } + + #[test] + fn test_occupied_entry_key() { + let mut a = HashMap::new(); + let key = "hello there"; + let value = "value goes here"; + assert!(a.is_empty()); + a.insert(key, value); + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + + match a.entry(key) { + Vacant(_) => panic!(), + Occupied(e) => assert_eq!(key, *e.key()), + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + } + + #[test] + fn test_occupied_entry_ref_key() { + let mut a = HashMap::new(); + let key = "hello there"; + let value = "value goes here"; + assert!(a.is_empty()); + a.insert(key.to_owned(), value); + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + + match a.entry_ref(key) { + EntryRef::Vacant(_) => panic!(), + EntryRef::Occupied(e) => assert_eq!(key, e.key()), + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + } + + #[test] + fn test_vacant_entry_key() { + let mut a = HashMap::new(); + let key = "hello there"; + let value = "value goes here"; + + assert!(a.is_empty()); + match a.entry(key) { + Occupied(_) => panic!(), + Vacant(e) => { + assert_eq!(key, *e.key()); + e.insert(value); + } + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + } + + #[test] + fn test_vacant_entry_ref_key() { + let mut a: HashMap = HashMap::new(); + let key = "hello there"; + let value = "value goes here"; + + assert!(a.is_empty()); + match a.entry_ref(key) { + EntryRef::Occupied(_) => panic!(), + EntryRef::Vacant(e) => { + assert_eq!(key, e.key()); + e.insert(value); + } + } + assert_eq!(a.len(), 1); + assert_eq!(a[key], value); + } + + #[test] + fn test_occupied_entry_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a.entry(key).insert(value).replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = match a.entry(key) { + Occupied(e) => e.replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }), + Vacant(_) => panic!(), + }; + + match entry { + Vacant(e) => assert_eq!(e.key(), &key), + Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_occupied_entry_ref_replace_entry_with() { + let mut a: HashMap = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a.entry_ref(key).insert(value).replace_entry_with(|k, v| { + assert_eq!(k, key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + EntryRef::Occupied(e) => { + assert_eq!(e.key(), key); + assert_eq!(e.get(), &new_value); + } + EntryRef::Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = match a.entry_ref(key) { + EntryRef::Occupied(e) => e.replace_entry_with(|k, v| { + assert_eq!(k, key); + assert_eq!(v, new_value); + None + }), + EntryRef::Vacant(_) => panic!(), + }; + + match entry { + EntryRef::Vacant(e) => assert_eq!(e.key(), key), + EntryRef::Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_entry_and_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a.entry(key).and_replace_entry_with(|_, _| panic!()); + + match entry { + Vacant(e) => assert_eq!(e.key(), &key), + Occupied(_) => panic!(), + } + + a.insert(key, value); + + let entry = a.entry(key).and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = a.entry(key).and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }); + + match entry { + Vacant(e) => assert_eq!(e.key(), &key), + Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_entry_ref_and_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a.entry_ref(key).and_replace_entry_with(|_, _| panic!()); + + match entry { + EntryRef::Vacant(e) => assert_eq!(e.key(), key), + EntryRef::Occupied(_) => panic!(), + } + + a.insert(key.to_owned(), value); + + let entry = a.entry_ref(key).and_replace_entry_with(|k, v| { + assert_eq!(k, key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + EntryRef::Occupied(e) => { + assert_eq!(e.key(), key); + assert_eq!(e.get(), &new_value); + } + EntryRef::Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = a.entry_ref(key).and_replace_entry_with(|k, v| { + assert_eq!(k, key); + assert_eq!(v, new_value); + None + }); + + match entry { + EntryRef::Vacant(e) => assert_eq!(e.key(), key), + EntryRef::Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_raw_occupied_entry_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a + .raw_entry_mut() + .from_key(&key) + .insert(key, value) + .replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + RawEntryMut::Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + RawEntryMut::Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = match a.raw_entry_mut().from_key(&key) { + RawEntryMut::Occupied(e) => e.replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }), + RawEntryMut::Vacant(_) => panic!(), + }; + + match entry { + RawEntryMut::Vacant(_) => {} + RawEntryMut::Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_raw_entry_and_replace_entry_with() { + let mut a = HashMap::new(); + + let key = "a key"; + let value = "an initial value"; + let new_value = "a new value"; + + let entry = a + .raw_entry_mut() + .from_key(&key) + .and_replace_entry_with(|_, _| panic!()); + + match entry { + RawEntryMut::Vacant(_) => {} + RawEntryMut::Occupied(_) => panic!(), + } + + a.insert(key, value); + + let entry = a + .raw_entry_mut() + .from_key(&key) + .and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, value); + Some(new_value) + }); + + match entry { + RawEntryMut::Occupied(e) => { + assert_eq!(e.key(), &key); + assert_eq!(e.get(), &new_value); + } + RawEntryMut::Vacant(_) => panic!(), + } + + assert_eq!(a[key], new_value); + assert_eq!(a.len(), 1); + + let entry = a + .raw_entry_mut() + .from_key(&key) + .and_replace_entry_with(|k, v| { + assert_eq!(k, &key); + assert_eq!(v, new_value); + None + }); + + match entry { + RawEntryMut::Vacant(_) => {} + RawEntryMut::Occupied(_) => panic!(), + } + + assert!(!a.contains_key(key)); + assert_eq!(a.len(), 0); + } + + #[test] + fn test_replace_entry_with_doesnt_corrupt() { + #![allow(deprecated)] //rand + // Test for #19292 + fn check(m: &HashMap) { + for k in m.keys() { + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); + } + } + + let mut m = HashMap::new(); + + let mut rng = { + let seed = u64::from_le_bytes(*b"testseed"); + SmallRng::seed_from_u64(seed) + }; + + // Populate the map with some items. + for _ in 0..50 { + let x = rng.gen_range(-10..10); + m.insert(x, ()); + } + + for _ in 0..1000 { + let x = rng.gen_range(-10..10); + m.entry(x).and_replace_entry_with(|_, _| None); + check(&m); + } + } + + #[test] + fn test_replace_entry_ref_with_doesnt_corrupt() { + #![allow(deprecated)] //rand + // Test for #19292 + fn check(m: &HashMap) { + for k in m.keys() { + assert!(m.contains_key(k), "{} is in keys() but not in the map?", k); + } + } + + let mut m = HashMap::new(); + + let mut rng = { + let seed = u64::from_le_bytes(*b"testseed"); + SmallRng::seed_from_u64(seed) + }; + + // Populate the map with some items. + for _ in 0..50 { + let mut x = std::string::String::with_capacity(1); + x.push(rng.gen_range('a'..='z')); + m.insert(x, ()); + } + + for _ in 0..1000 { + let mut x = std::string::String::with_capacity(1); + x.push(rng.gen_range('a'..='z')); + m.entry_ref(x.as_str()).and_replace_entry_with(|_, _| None); + check(&m); + } + } + + #[test] + fn test_retain() { + let mut map: HashMap = (0..100).map(|x| (x, x * 10)).collect(); + + map.retain(|&k, _| k % 2 == 0); + assert_eq!(map.len(), 50); + assert_eq!(map[&2], 20); + assert_eq!(map[&4], 40); + assert_eq!(map[&6], 60); + } + + #[test] + fn test_drain_filter() { + { + let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); + let drained = map.drain_filter(|&k, _| k % 2 == 0); + let mut out = drained.collect::>(); + out.sort_unstable(); + assert_eq!(vec![(0, 0), (2, 20), (4, 40), (6, 60)], out); + assert_eq!(map.len(), 4); + } + { + let mut map: HashMap = (0..8).map(|x| (x, x * 10)).collect(); + drop(map.drain_filter(|&k, _| k % 2 == 0)); + assert_eq!(map.len(), 4); + } + } + + #[test] + #[cfg_attr(miri, ignore)] // FIXME: no OOM signalling (https://github.com/rust-lang/miri/issues/613) + fn test_try_reserve() { + use crate::TryReserveError::{AllocError, CapacityOverflow}; + + const MAX_USIZE: usize = usize::MAX; + + let mut empty_bytes: HashMap = HashMap::new(); + + if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) { + } else { + panic!("usize::MAX should trigger an overflow!"); + } + + if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 16) { + } else { + // This may succeed if there is enough free memory. Attempt to + // allocate a few more hashmaps to ensure the allocation will fail. + let mut empty_bytes2: HashMap = HashMap::new(); + let _ = empty_bytes2.try_reserve(MAX_USIZE / 16); + let mut empty_bytes3: HashMap = HashMap::new(); + let _ = empty_bytes3.try_reserve(MAX_USIZE / 16); + let mut empty_bytes4: HashMap = HashMap::new(); + if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_USIZE / 16) { + } else { + panic!("usize::MAX / 8 should trigger an OOM!"); + } + } + } + + #[test] + fn test_raw_entry() { + use super::RawEntryMut::{Occupied, Vacant}; + + let xs = [(1_i32, 10_i32), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)]; + + let mut map: HashMap<_, _> = xs.iter().copied().collect(); + + let compute_hash = |map: &HashMap, k: i32| -> u64 { + super::make_insert_hash::(map.hasher(), &k) + }; + + // Existing key (insert) + match map.raw_entry_mut().from_key(&1) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + assert_eq!(view.get(), &10); + assert_eq!(view.insert(100), 10); + } + } + let hash1 = compute_hash(&map, 1); + assert_eq!(map.raw_entry().from_key(&1).unwrap(), (&1, &100)); + assert_eq!( + map.raw_entry().from_hash(hash1, |k| *k == 1).unwrap(), + (&1, &100) + ); + assert_eq!( + map.raw_entry().from_key_hashed_nocheck(hash1, &1).unwrap(), + (&1, &100) + ); + assert_eq!(map.len(), 6); + + // Existing key (update) + match map.raw_entry_mut().from_key(&2) { + Vacant(_) => unreachable!(), + Occupied(mut view) => { + let v = view.get_mut(); + let new_v = (*v) * 10; + *v = new_v; + } + } + let hash2 = compute_hash(&map, 2); + assert_eq!(map.raw_entry().from_key(&2).unwrap(), (&2, &200)); + assert_eq!( + map.raw_entry().from_hash(hash2, |k| *k == 2).unwrap(), + (&2, &200) + ); + assert_eq!( + map.raw_entry().from_key_hashed_nocheck(hash2, &2).unwrap(), + (&2, &200) + ); + assert_eq!(map.len(), 6); + + // Existing key (take) + let hash3 = compute_hash(&map, 3); + match map.raw_entry_mut().from_key_hashed_nocheck(hash3, &3) { + Vacant(_) => unreachable!(), + Occupied(view) => { + assert_eq!(view.remove_entry(), (3, 30)); + } + } + assert_eq!(map.raw_entry().from_key(&3), None); + assert_eq!(map.raw_entry().from_hash(hash3, |k| *k == 3), None); + assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash3, &3), None); + assert_eq!(map.len(), 5); + + // Nonexistent key (insert) + match map.raw_entry_mut().from_key(&10) { + Occupied(_) => unreachable!(), + Vacant(view) => { + assert_eq!(view.insert(10, 1000), (&mut 10, &mut 1000)); + } + } + assert_eq!(map.raw_entry().from_key(&10).unwrap(), (&10, &1000)); + assert_eq!(map.len(), 6); + + // Ensure all lookup methods produce equivalent results. + for k in 0..12 { + let hash = compute_hash(&map, k); + let v = map.get(&k).copied(); + let kv = v.as_ref().map(|v| (&k, v)); + + assert_eq!(map.raw_entry().from_key(&k), kv); + assert_eq!(map.raw_entry().from_hash(hash, |q| *q == k), kv); + assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); + + match map.raw_entry_mut().from_key(&k) { + Occupied(o) => assert_eq!(Some(o.get_key_value()), kv), + Vacant(_) => assert_eq!(v, None), + } + match map.raw_entry_mut().from_key_hashed_nocheck(hash, &k) { + Occupied(o) => assert_eq!(Some(o.get_key_value()), kv), + Vacant(_) => assert_eq!(v, None), + } + match map.raw_entry_mut().from_hash(hash, |q| *q == k) { + Occupied(o) => assert_eq!(Some(o.get_key_value()), kv), + Vacant(_) => assert_eq!(v, None), + } + } + } + + #[test] + fn test_key_without_hash_impl() { + #[derive(Debug)] + struct IntWrapper(u64); + + let mut m: HashMap = HashMap::default(); + { + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_none()); + } + { + let vacant_entry = match m.raw_entry_mut().from_hash(0, |k| k.0 == 0) { + RawEntryMut::Occupied(..) => panic!("Found entry for key 0"), + RawEntryMut::Vacant(e) => e, + }; + vacant_entry.insert_with_hasher(0, IntWrapper(0), (), |k| k.0); + } + { + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_some()); + assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_none()); + assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); + } + { + let vacant_entry = match m.raw_entry_mut().from_hash(1, |k| k.0 == 1) { + RawEntryMut::Occupied(..) => panic!("Found entry for key 1"), + RawEntryMut::Vacant(e) => e, + }; + vacant_entry.insert_with_hasher(1, IntWrapper(1), (), |k| k.0); + } + { + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_some()); + assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_some()); + assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); + } + { + let occupied_entry = match m.raw_entry_mut().from_hash(0, |k| k.0 == 0) { + RawEntryMut::Occupied(e) => e, + RawEntryMut::Vacant(..) => panic!("Couldn't find entry for key 0"), + }; + occupied_entry.remove(); + } + assert!(m.raw_entry().from_hash(0, |k| k.0 == 0).is_none()); + assert!(m.raw_entry().from_hash(1, |k| k.0 == 1).is_some()); + assert!(m.raw_entry().from_hash(2, |k| k.0 == 2).is_none()); + } + + #[test] + #[cfg(feature = "raw")] + fn test_into_iter_refresh() { + #[cfg(miri)] + const N: usize = 32; + #[cfg(not(miri))] + const N: usize = 128; + + let mut rng = rand::thread_rng(); + for n in 0..N { + let mut map = HashMap::new(); + for i in 0..n { + assert!(map.insert(i, 2 * i).is_none()); + } + let hash_builder = map.hasher().clone(); + + let mut it = unsafe { map.table.iter() }; + assert_eq!(it.len(), n); + + let mut i = 0; + let mut left = n; + let mut removed = Vec::new(); + loop { + // occasionally remove some elements + if i < n && rng.gen_bool(0.1) { + let hash_value = super::make_insert_hash(&hash_builder, &i); + + unsafe { + let e = map.table.find(hash_value, |q| q.0.eq(&i)); + if let Some(e) = e { + it.reflect_remove(&e); + let t = map.table.remove(e); + removed.push(t); + left -= 1; + } else { + assert!(removed.contains(&(i, 2 * i)), "{} not in {:?}", i, removed); + let e = map.table.insert( + hash_value, + (i, 2 * i), + super::make_hasher::(&hash_builder), + ); + it.reflect_insert(&e); + if let Some(p) = removed.iter().position(|e| e == &(i, 2 * i)) { + removed.swap_remove(p); + } + left += 1; + } + } + } + + let e = it.next(); + if e.is_none() { + break; + } + assert!(i < n); + let t = unsafe { e.unwrap().as_ref() }; + assert!(!removed.contains(t)); + let (key, value) = t; + assert_eq!(*value, 2 * key); + i += 1; + } + assert!(i <= n); + + // just for safety: + assert_eq!(map.table.len(), left); + } + } + + #[test] + fn test_const_with_hasher() { + use core::hash::BuildHasher; + use std::collections::hash_map::DefaultHasher; + + #[derive(Clone)] + struct MyHasher; + impl BuildHasher for MyHasher { + type Hasher = DefaultHasher; + + fn build_hasher(&self) -> DefaultHasher { + DefaultHasher::new() + } + } + + const EMPTY_MAP: HashMap = + HashMap::with_hasher(MyHasher); + + let mut map = EMPTY_MAP; + map.insert(17, "seventeen".to_owned()); + assert_eq!("seventeen", map[&17]); + } + + #[test] + fn test_get_each_mut() { + let mut map = HashMap::new(); + map.insert("foo".to_owned(), 0); + map.insert("bar".to_owned(), 10); + map.insert("baz".to_owned(), 20); + map.insert("qux".to_owned(), 30); + + let xs = map.get_many_mut(["foo", "qux"]); + assert_eq!(xs, Some([&mut 0, &mut 30])); + + let xs = map.get_many_mut(["foo", "dud"]); + assert_eq!(xs, None); + + let xs = map.get_many_mut(["foo", "foo"]); + assert_eq!(xs, None); + + let ys = map.get_many_key_value_mut(["bar", "baz"]); + assert_eq!( + ys, + Some([(&"bar".to_owned(), &mut 10), (&"baz".to_owned(), &mut 20),]), + ); + + let ys = map.get_many_key_value_mut(["bar", "dip"]); + assert_eq!(ys, None); + + let ys = map.get_many_key_value_mut(["baz", "baz"]); + assert_eq!(ys, None); + } + + #[test] + #[should_panic = "panic in drop"] + fn test_clone_from_double_drop() { + #[derive(Clone)] + struct CheckedDrop { + panic_in_drop: bool, + dropped: bool, + } + impl Drop for CheckedDrop { + fn drop(&mut self) { + if self.panic_in_drop { + self.dropped = true; + panic!("panic in drop"); + } + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } + const DISARMED: CheckedDrop = CheckedDrop { + panic_in_drop: false, + dropped: false, + }; + const ARMED: CheckedDrop = CheckedDrop { + panic_in_drop: true, + dropped: false, + }; + + let mut map1 = HashMap::new(); + map1.insert(1, DISARMED); + map1.insert(2, DISARMED); + map1.insert(3, DISARMED); + map1.insert(4, DISARMED); + + let mut map2 = HashMap::new(); + map2.insert(1, DISARMED); + map2.insert(2, ARMED); + map2.insert(3, DISARMED); + map2.insert(4, DISARMED); + + map2.clone_from(&map1); + } +} diff --git a/vendor/hashbrown-0.12.3/src/raw/alloc.rs b/vendor/hashbrown-0.12.3/src/raw/alloc.rs new file mode 100644 index 0000000..ba09ea9 --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/raw/alloc.rs @@ -0,0 +1,73 @@ +pub(crate) use self::inner::{do_alloc, Allocator, Global}; + +#[cfg(feature = "nightly")] +mod inner { + use crate::alloc::alloc::Layout; + pub use crate::alloc::alloc::{Allocator, Global}; + use core::ptr::NonNull; + + #[allow(clippy::map_err_ignore)] + pub fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + match alloc.allocate(layout) { + Ok(ptr) => Ok(ptr.as_non_null_ptr()), + Err(_) => Err(()), + } + } + + #[cfg(feature = "bumpalo")] + unsafe impl Allocator for crate::BumpWrapper<'_> { + #[inline] + fn allocate(&self, layout: Layout) -> Result, core::alloc::AllocError> { + match self.0.try_alloc_layout(layout) { + Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())), + Err(_) => Err(core::alloc::AllocError), + } + } + #[inline] + unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) {} + } +} + +#[cfg(not(feature = "nightly"))] +mod inner { + use crate::alloc::alloc::{alloc, dealloc, Layout}; + use core::ptr::NonNull; + + #[allow(clippy::missing_safety_doc)] // not exposed outside of this crate + pub unsafe trait Allocator { + fn allocate(&self, layout: Layout) -> Result, ()>; + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); + } + + #[derive(Copy, Clone)] + pub struct Global; + unsafe impl Allocator for Global { + #[inline] + fn allocate(&self, layout: Layout) -> Result, ()> { + unsafe { NonNull::new(alloc(layout)).ok_or(()) } + } + #[inline] + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + dealloc(ptr.as_ptr(), layout); + } + } + impl Default for Global { + #[inline] + fn default() -> Self { + Global + } + } + + pub fn do_alloc(alloc: &A, layout: Layout) -> Result, ()> { + alloc.allocate(layout) + } + + #[cfg(feature = "bumpalo")] + unsafe impl Allocator for crate::BumpWrapper<'_> { + #[allow(clippy::map_err_ignore)] + fn allocate(&self, layout: Layout) -> Result, ()> { + self.0.try_alloc_layout(layout).map_err(|_| ()) + } + unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) {} + } +} diff --git a/vendor/hashbrown-0.12.3/src/raw/bitmask.rs b/vendor/hashbrown-0.12.3/src/raw/bitmask.rs new file mode 100644 index 0000000..7d4f9fc --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/raw/bitmask.rs @@ -0,0 +1,122 @@ +use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE}; +#[cfg(feature = "nightly")] +use core::intrinsics; + +/// A bit mask which contains the result of a `Match` operation on a `Group` and +/// allows iterating through them. +/// +/// The bit mask is arranged so that low-order bits represent lower memory +/// addresses for group match results. +/// +/// For implementation reasons, the bits in the set may be sparsely packed, so +/// that there is only one bit-per-byte used (the high bit, 7). If this is the +/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be +/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is +/// similarly a mask of all the actually-used bits. +#[derive(Copy, Clone)] +pub struct BitMask(pub BitMaskWord); + +#[allow(clippy::use_self)] +impl BitMask { + /// Returns a new `BitMask` with all bits inverted. + #[inline] + #[must_use] + pub fn invert(self) -> Self { + BitMask(self.0 ^ BITMASK_MASK) + } + + /// Flip the bit in the mask for the entry at the given index. + /// + /// Returns the bit's previous state. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + #[cfg(feature = "raw")] + pub unsafe fn flip(&mut self, index: usize) -> bool { + // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit. + let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1); + self.0 ^= mask; + // The bit was set if the bit is now 0. + self.0 & mask == 0 + } + + /// Returns a new `BitMask` with the lowest bit removed. + #[inline] + #[must_use] + pub fn remove_lowest_bit(self) -> Self { + BitMask(self.0 & (self.0 - 1)) + } + /// Returns whether the `BitMask` has at least one set bit. + #[inline] + pub fn any_bit_set(self) -> bool { + self.0 != 0 + } + + /// Returns the first set bit in the `BitMask`, if there is one. + #[inline] + pub fn lowest_set_bit(self) -> Option { + if self.0 == 0 { + None + } else { + Some(unsafe { self.lowest_set_bit_nonzero() }) + } + } + + /// Returns the first set bit in the `BitMask`, if there is one. The + /// bitmask must not be empty. + #[inline] + #[cfg(feature = "nightly")] + pub unsafe fn lowest_set_bit_nonzero(self) -> usize { + intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE + } + #[inline] + #[cfg(not(feature = "nightly"))] + pub unsafe fn lowest_set_bit_nonzero(self) -> usize { + self.trailing_zeros() + } + + /// Returns the number of trailing zeroes in the `BitMask`. + #[inline] + pub fn trailing_zeros(self) -> usize { + // ARM doesn't have a trailing_zeroes instruction, and instead uses + // reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM + // versions (pre-ARMv7) don't have RBIT and need to emulate it + // instead. Since we only have 1 bit set in each byte on ARM, we can + // use swap_bytes (REV) + leading_zeroes instead. + if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 { + self.0.swap_bytes().leading_zeros() as usize / BITMASK_STRIDE + } else { + self.0.trailing_zeros() as usize / BITMASK_STRIDE + } + } + + /// Returns the number of leading zeroes in the `BitMask`. + #[inline] + pub fn leading_zeros(self) -> usize { + self.0.leading_zeros() as usize / BITMASK_STRIDE + } +} + +impl IntoIterator for BitMask { + type Item = usize; + type IntoIter = BitMaskIter; + + #[inline] + fn into_iter(self) -> BitMaskIter { + BitMaskIter(self) + } +} + +/// Iterator over the contents of a `BitMask`, returning the indices of set +/// bits. +pub struct BitMaskIter(BitMask); + +impl Iterator for BitMaskIter { + type Item = usize; + + #[inline] + fn next(&mut self) -> Option { + let bit = self.0.lowest_set_bit()?; + self.0 = self.0.remove_lowest_bit(); + Some(bit) + } +} diff --git a/vendor/hashbrown-0.12.3/src/raw/generic.rs b/vendor/hashbrown-0.12.3/src/raw/generic.rs new file mode 100644 index 0000000..b4d31e6 --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/raw/generic.rs @@ -0,0 +1,154 @@ +use super::bitmask::BitMask; +use super::EMPTY; +use core::{mem, ptr}; + +// Use the native word size as the group size. Using a 64-bit group size on +// a 32-bit architecture will just end up being more expensive because +// shifts and multiplies will need to be emulated. +#[cfg(any( + target_pointer_width = "64", + target_arch = "aarch64", + target_arch = "x86_64", + target_arch = "wasm32", +))] +type GroupWord = u64; +#[cfg(all( + target_pointer_width = "32", + not(target_arch = "aarch64"), + not(target_arch = "x86_64"), + not(target_arch = "wasm32"), +))] +type GroupWord = u32; + +pub type BitMaskWord = GroupWord; +pub const BITMASK_STRIDE: usize = 8; +// We only care about the highest bit of each byte for the mask. +#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)] +pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord; + +/// Helper function to replicate a byte across a `GroupWord`. +#[inline] +fn repeat(byte: u8) -> GroupWord { + GroupWord::from_ne_bytes([byte; Group::WIDTH]) +} + +/// Abstraction over a group of control bytes which can be scanned in +/// parallel. +/// +/// This implementation uses a word-sized integer. +#[derive(Copy, Clone)] +pub struct Group(GroupWord); + +// We perform all operations in the native endianness, and convert to +// little-endian just before creating a BitMask. The can potentially +// enable the compiler to eliminate unnecessary byte swaps if we are +// only checking whether a BitMask is empty. +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty bytes, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + #[inline] + pub const fn static_empty() -> &'static [u8; Group::WIDTH] { + #[repr(C)] + struct AlignedBytes { + _align: [Group; 0], + bytes: [u8; Group::WIDTH], + } + const ALIGNED_BYTES: AlignedBytes = AlignedBytes { + _align: [], + bytes: [EMPTY; Group::WIDTH], + }; + &ALIGNED_BYTES.bytes + } + + /// Loads a group of bytes starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub unsafe fn load(ptr: *const u8) -> Self { + Group(ptr::read_unaligned(ptr.cast())) + } + + /// Loads a group of bytes starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub unsafe fn load_aligned(ptr: *const u8) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(ptr::read(ptr.cast())) + } + + /// Stores the group of bytes to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub unsafe fn store_aligned(self, ptr: *mut u8) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + ptr::write(ptr.cast(), self.0); + } + + /// Returns a `BitMask` indicating all bytes in the group which *may* + /// have the given value. + /// + /// This function may return a false positive in certain cases where + /// the byte in the group differs from the searched value only in its + /// lowest bit. This is fine because: + /// - This never happens for `EMPTY` and `DELETED`, only full entries. + /// - The check for key equality will catch these. + /// - This only happens if there is at least 1 true match. + /// - The chance of this happening is very low (< 1% chance per byte). + #[inline] + pub fn match_byte(self, byte: u8) -> BitMask { + // This algorithm is derived from + // https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord + let cmp = self.0 ^ repeat(byte); + BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le()) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY`. + #[inline] + pub fn match_empty(self) -> BitMask { + // If the high bit is set, then the byte must be either: + // 1111_1111 (EMPTY) or 1000_0000 (DELETED). + // So we can just check if the top two bits are 1 by ANDing them. + BitMask((self.0 & (self.0 << 1) & repeat(0x80)).to_le()) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub fn match_empty_or_deleted(self) -> BitMask { + // A byte is EMPTY or DELETED iff the high bit is set + BitMask((self.0 & repeat(0x80)).to_le()) + } + + /// Returns a `BitMask` indicating all bytes in the group which are full. + #[inline] + pub fn match_full(self) -> BitMask { + self.match_empty_or_deleted().invert() + } + + /// Performs the following transformation on all bytes in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let full = 1000_0000 (true) or 0000_0000 (false) + // !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry) + // !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry) + let full = !self.0 & repeat(0x80); + Group(!full + (full >> 7)) + } +} diff --git a/vendor/hashbrown-0.12.3/src/raw/mod.rs b/vendor/hashbrown-0.12.3/src/raw/mod.rs new file mode 100644 index 0000000..211b818 --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/raw/mod.rs @@ -0,0 +1,2460 @@ +use crate::alloc::alloc::{handle_alloc_error, Layout}; +use crate::scopeguard::{guard, ScopeGuard}; +use crate::TryReserveError; +use core::iter::FusedIterator; +use core::marker::PhantomData; +use core::mem; +use core::mem::ManuallyDrop; +use core::mem::MaybeUninit; +use core::ptr::NonNull; +use core::{hint, ptr}; + +cfg_if! { + // Use the SSE2 implementation if possible: it allows us to scan 16 buckets + // at once instead of 8. We don't bother with AVX since it would require + // runtime dispatch and wouldn't gain us much anyways: the probability of + // finding a match drops off drastically after the first few buckets. + // + // I attempted an implementation on ARM using NEON instructions, but it + // turns out that most NEON instructions have multi-cycle latency, which in + // the end outweighs any gains over the generic implementation. + if #[cfg(all( + target_feature = "sse2", + any(target_arch = "x86", target_arch = "x86_64"), + not(miri) + ))] { + mod sse2; + use sse2 as imp; + } else { + #[path = "generic.rs"] + mod generic; + use generic as imp; + } +} + +mod alloc; +pub(crate) use self::alloc::{do_alloc, Allocator, Global}; + +mod bitmask; + +use self::bitmask::{BitMask, BitMaskIter}; +use self::imp::Group; + +// Branch prediction hint. This is currently only available on nightly but it +// consistently improves performance by 10-15%. +#[cfg(feature = "nightly")] +use core::intrinsics::{likely, unlikely}; + +// On stable we can use #[cold] to get a equivalent effect: this attributes +// suggests that the function is unlikely to be called +#[cfg(not(feature = "nightly"))] +#[inline] +#[cold] +fn cold() {} + +#[cfg(not(feature = "nightly"))] +#[inline] +fn likely(b: bool) -> bool { + if !b { + cold(); + } + b +} +#[cfg(not(feature = "nightly"))] +#[inline] +fn unlikely(b: bool) -> bool { + if b { + cold(); + } + b +} + +#[inline] +unsafe fn offset_from(to: *const T, from: *const T) -> usize { + to.offset_from(from) as usize +} + +/// Whether memory allocation errors should return an error or abort. +#[derive(Copy, Clone)] +enum Fallibility { + Fallible, + Infallible, +} + +impl Fallibility { + /// Error to return on capacity overflow. + #[cfg_attr(feature = "inline-more", inline)] + fn capacity_overflow(self) -> TryReserveError { + match self { + Fallibility::Fallible => TryReserveError::CapacityOverflow, + Fallibility::Infallible => panic!("Hash table capacity overflow"), + } + } + + /// Error to return on allocation error. + #[cfg_attr(feature = "inline-more", inline)] + fn alloc_err(self, layout: Layout) -> TryReserveError { + match self { + Fallibility::Fallible => TryReserveError::AllocError { layout }, + Fallibility::Infallible => handle_alloc_error(layout), + } + } +} + +/// Control byte value for an empty bucket. +const EMPTY: u8 = 0b1111_1111; + +/// Control byte value for a deleted bucket. +const DELETED: u8 = 0b1000_0000; + +/// Checks whether a control byte represents a full bucket (top bit is clear). +#[inline] +fn is_full(ctrl: u8) -> bool { + ctrl & 0x80 == 0 +} + +/// Checks whether a control byte represents a special value (top bit is set). +#[inline] +fn is_special(ctrl: u8) -> bool { + ctrl & 0x80 != 0 +} + +/// Checks whether a special control value is EMPTY (just check 1 bit). +#[inline] +fn special_is_empty(ctrl: u8) -> bool { + debug_assert!(is_special(ctrl)); + ctrl & 0x01 != 0 +} + +/// Primary hash function, used to select the initial bucket to probe from. +#[inline] +#[allow(clippy::cast_possible_truncation)] +fn h1(hash: u64) -> usize { + // On 32-bit platforms we simply ignore the higher hash bits. + hash as usize +} + +/// Secondary hash function, saved in the low 7 bits of the control byte. +#[inline] +#[allow(clippy::cast_possible_truncation)] +fn h2(hash: u64) -> u8 { + // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit + // value, some hash functions (such as FxHash) produce a usize result + // instead, which means that the top 32 bits are 0 on 32-bit platforms. + let hash_len = usize::min(mem::size_of::(), mem::size_of::()); + let top7 = hash >> (hash_len * 8 - 7); + (top7 & 0x7f) as u8 // truncation +} + +/// Probe sequence based on triangular numbers, which is guaranteed (since our +/// table size is a power of two) to visit every group of elements exactly once. +/// +/// A triangular probe has us jump by 1 more group every time. So first we +/// jump by 1 group (meaning we just continue our linear scan), then 2 groups +/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on. +/// +/// Proof that the probe will visit every group in the table: +/// +struct ProbeSeq { + pos: usize, + stride: usize, +} + +impl ProbeSeq { + #[inline] + fn move_next(&mut self, bucket_mask: usize) { + // We should have found an empty bucket by now and ended the probe. + debug_assert!( + self.stride <= bucket_mask, + "Went past end of probe sequence" + ); + + self.stride += Group::WIDTH; + self.pos += self.stride; + self.pos &= bucket_mask; + } +} + +/// Returns the number of buckets needed to hold the given number of items, +/// taking the maximum load factor into account. +/// +/// Returns `None` if an overflow occurs. +// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258 +#[cfg_attr(target_os = "emscripten", inline(never))] +#[cfg_attr(not(target_os = "emscripten"), inline)] +fn capacity_to_buckets(cap: usize) -> Option { + debug_assert_ne!(cap, 0); + + // For small tables we require at least 1 empty bucket so that lookups are + // guaranteed to terminate if an element doesn't exist in the table. + if cap < 8 { + // We don't bother with a table size of 2 buckets since that can only + // hold a single element. Instead we skip directly to a 4 bucket table + // which can hold 3 elements. + return Some(if cap < 4 { 4 } else { 8 }); + } + + // Otherwise require 1/8 buckets to be empty (87.5% load) + // + // Be careful when modifying this, calculate_layout relies on the + // overflow check here. + let adjusted_cap = cap.checked_mul(8)? / 7; + + // Any overflows will have been caught by the checked_mul. Also, any + // rounding errors from the division above will be cleaned up by + // next_power_of_two (which can't overflow because of the previous division). + Some(adjusted_cap.next_power_of_two()) +} + +/// Returns the maximum effective capacity for the given bucket mask, taking +/// the maximum load factor into account. +#[inline] +fn bucket_mask_to_capacity(bucket_mask: usize) -> usize { + if bucket_mask < 8 { + // For tables with 1/2/4/8 buckets, we always reserve one empty slot. + // Keep in mind that the bucket mask is one less than the bucket count. + bucket_mask + } else { + // For larger tables we reserve 12.5% of the slots as empty. + ((bucket_mask + 1) / 8) * 7 + } +} + +/// Helper which allows the max calculation for ctrl_align to be statically computed for each T +/// while keeping the rest of `calculate_layout_for` independent of `T` +#[derive(Copy, Clone)] +struct TableLayout { + size: usize, + ctrl_align: usize, +} + +impl TableLayout { + #[inline] + fn new() -> Self { + let layout = Layout::new::(); + Self { + size: layout.size(), + ctrl_align: usize::max(layout.align(), Group::WIDTH), + } + } + + #[inline] + fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> { + debug_assert!(buckets.is_power_of_two()); + + let TableLayout { size, ctrl_align } = self; + // Manual layout calculation since Layout methods are not yet stable. + let ctrl_offset = + size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1); + let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; + + Some(( + unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, + ctrl_offset, + )) + } +} + +/// Returns a Layout which describes the allocation required for a hash table, +/// and the offset of the control bytes in the allocation. +/// (the offset is also one past last element of buckets) +/// +/// Returns `None` if an overflow occurs. +#[cfg_attr(feature = "inline-more", inline)] +fn calculate_layout(buckets: usize) -> Option<(Layout, usize)> { + TableLayout::new::().calculate_layout_for(buckets) +} + +/// A reference to a hash table bucket containing a `T`. +/// +/// This is usually just a pointer to the element itself. However if the element +/// is a ZST, then we instead track the index of the element in the table so +/// that `erase` works properly. +pub struct Bucket { + // Actually it is pointer to next element than element itself + // this is needed to maintain pointer arithmetic invariants + // keeping direct pointer to element introduces difficulty. + // Using `NonNull` for variance and niche layout + ptr: NonNull, +} + +// This Send impl is needed for rayon support. This is safe since Bucket is +// never exposed in a public API. +unsafe impl Send for Bucket {} + +impl Clone for Bucket { + #[inline] + fn clone(&self) -> Self { + Self { ptr: self.ptr } + } +} + +impl Bucket { + #[inline] + unsafe fn from_base_index(base: NonNull, index: usize) -> Self { + let ptr = if mem::size_of::() == 0 { + // won't overflow because index must be less than length + (index + 1) as *mut T + } else { + base.as_ptr().sub(index) + }; + Self { + ptr: NonNull::new_unchecked(ptr), + } + } + #[inline] + unsafe fn to_base_index(&self, base: NonNull) -> usize { + if mem::size_of::() == 0 { + self.ptr.as_ptr() as usize - 1 + } else { + offset_from(base.as_ptr(), self.ptr.as_ptr()) + } + } + #[inline] + pub fn as_ptr(&self) -> *mut T { + if mem::size_of::() == 0 { + // Just return an arbitrary ZST pointer which is properly aligned + mem::align_of::() as *mut T + } else { + unsafe { self.ptr.as_ptr().sub(1) } + } + } + #[inline] + unsafe fn next_n(&self, offset: usize) -> Self { + let ptr = if mem::size_of::() == 0 { + (self.ptr.as_ptr() as usize + offset) as *mut T + } else { + self.ptr.as_ptr().sub(offset) + }; + Self { + ptr: NonNull::new_unchecked(ptr), + } + } + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn drop(&self) { + self.as_ptr().drop_in_place(); + } + #[inline] + pub unsafe fn read(&self) -> T { + self.as_ptr().read() + } + #[inline] + pub unsafe fn write(&self, val: T) { + self.as_ptr().write(val); + } + #[inline] + pub unsafe fn as_ref<'a>(&self) -> &'a T { + &*self.as_ptr() + } + #[inline] + pub unsafe fn as_mut<'a>(&self) -> &'a mut T { + &mut *self.as_ptr() + } + #[cfg(feature = "raw")] + #[inline] + pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) { + self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1); + } +} + +/// A raw hash table with an unsafe API. +pub struct RawTable { + table: RawTableInner, + // Tell dropck that we own instances of T. + marker: PhantomData, +} + +/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless +/// of how many different key-value types are used. +struct RawTableInner { + // Mask to get an index from a hash value. The value is one less than the + // number of buckets in the table. + bucket_mask: usize, + + // [Padding], T1, T2, ..., Tlast, C1, C2, ... + // ^ points here + ctrl: NonNull, + + // Number of elements that can be inserted before we need to grow the table + growth_left: usize, + + // Number of elements in the table, only really used by len() + items: usize, + + alloc: A, +} + +impl RawTable { + /// Creates a new empty hash table without allocating any memory. + /// + /// In effect this returns a table with exactly 1 bucket. However we can + /// leave the data pointer dangling since that bucket is never written to + /// due to our load factor forcing us to always have at least 1 free bucket. + #[inline] + pub const fn new() -> Self { + Self { + table: RawTableInner::new_in(Global), + marker: PhantomData, + } + } + + /// Attempts to allocate a new hash table with at least enough capacity + /// for inserting the given number of elements without reallocating. + #[cfg(feature = "raw")] + pub fn try_with_capacity(capacity: usize) -> Result { + Self::try_with_capacity_in(capacity, Global) + } + + /// Allocates a new hash table with at least enough capacity for inserting + /// the given number of elements without reallocating. + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_in(capacity, Global) + } +} + +impl RawTable { + /// Creates a new empty hash table without allocating any memory, using the + /// given allocator. + /// + /// In effect this returns a table with exactly 1 bucket. However we can + /// leave the data pointer dangling since that bucket is never written to + /// due to our load factor forcing us to always have at least 1 free bucket. + #[inline] + pub fn new_in(alloc: A) -> Self { + Self { + table: RawTableInner::new_in(alloc), + marker: PhantomData, + } + } + + /// Allocates a new hash table with the given number of buckets. + /// + /// The control bytes are left uninitialized. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new_uninitialized( + alloc: A, + buckets: usize, + fallibility: Fallibility, + ) -> Result { + debug_assert!(buckets.is_power_of_two()); + + Ok(Self { + table: RawTableInner::new_uninitialized( + alloc, + TableLayout::new::(), + buckets, + fallibility, + )?, + marker: PhantomData, + }) + } + + /// Attempts to allocate a new hash table with at least enough capacity + /// for inserting the given number of elements without reallocating. + fn fallible_with_capacity( + alloc: A, + capacity: usize, + fallibility: Fallibility, + ) -> Result { + Ok(Self { + table: RawTableInner::fallible_with_capacity( + alloc, + TableLayout::new::(), + capacity, + fallibility, + )?, + marker: PhantomData, + }) + } + + /// Attempts to allocate a new hash table using the given allocator, with at least enough + /// capacity for inserting the given number of elements without reallocating. + #[cfg(feature = "raw")] + pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { + Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible) + } + + /// Allocates a new hash table using the given allocator, with at least enough capacity for + /// inserting the given number of elements without reallocating. + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) { + Ok(capacity) => capacity, + Err(_) => unsafe { hint::unreachable_unchecked() }, + } + } + + /// Returns a reference to the underlying allocator. + #[inline] + pub fn allocator(&self) -> &A { + &self.table.alloc + } + + /// Deallocates the table without dropping any entries. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn free_buckets(&mut self) { + self.table.free_buckets(TableLayout::new::()); + } + + /// Returns pointer to one past last element of data table. + #[inline] + pub unsafe fn data_end(&self) -> NonNull { + NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) + } + + /// Returns pointer to start of data table. + #[inline] + #[cfg(feature = "nightly")] + pub unsafe fn data_start(&self) -> *mut T { + self.data_end().as_ptr().wrapping_sub(self.buckets()) + } + + /// Returns the index of a bucket from a `Bucket`. + #[inline] + pub unsafe fn bucket_index(&self, bucket: &Bucket) -> usize { + bucket.to_base_index(self.data_end()) + } + + /// Returns a pointer to an element in the table. + #[inline] + pub unsafe fn bucket(&self, index: usize) -> Bucket { + debug_assert_ne!(self.table.bucket_mask, 0); + debug_assert!(index < self.buckets()); + Bucket::from_base_index(self.data_end(), index) + } + + /// Erases an element from the table without dropping it. + #[cfg_attr(feature = "inline-more", inline)] + #[deprecated(since = "0.8.1", note = "use erase or remove instead")] + pub unsafe fn erase_no_drop(&mut self, item: &Bucket) { + let index = self.bucket_index(item); + self.table.erase(index); + } + + /// Erases an element from the table, dropping it in place. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::needless_pass_by_value)] + #[allow(deprecated)] + pub unsafe fn erase(&mut self, item: Bucket) { + // Erase the element from the table first since drop might panic. + self.erase_no_drop(&item); + item.drop(); + } + + /// Finds and erases an element from the table, dropping it in place. + /// Returns true if an element was found. + #[cfg(feature = "raw")] + #[cfg_attr(feature = "inline-more", inline)] + pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool { + // Avoid `Option::map` because it bloats LLVM IR. + if let Some(bucket) = self.find(hash, eq) { + unsafe { + self.erase(bucket); + } + true + } else { + false + } + } + + /// Removes an element from the table, returning it. + #[cfg_attr(feature = "inline-more", inline)] + #[allow(clippy::needless_pass_by_value)] + #[allow(deprecated)] + pub unsafe fn remove(&mut self, item: Bucket) -> T { + self.erase_no_drop(&item); + item.read() + } + + /// Finds and removes an element from the table, returning it. + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.find(hash, eq) { + Some(bucket) => Some(unsafe { self.remove(bucket) }), + None => None, + } + } + + /// Marks all table buckets as empty without dropping their contents. + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear_no_drop(&mut self) { + self.table.clear_no_drop(); + } + + /// Removes all elements from the table without freeing the backing memory. + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear(&mut self) { + // Ensure that the table is reset even if one of the drops panic + let mut self_ = guard(self, |self_| self_.clear_no_drop()); + unsafe { + self_.drop_elements(); + } + } + + unsafe fn drop_elements(&mut self) { + if mem::needs_drop::() && !self.is_empty() { + for item in self.iter() { + item.drop(); + } + } + } + + /// Shrinks the table to fit `max(self.len(), min_size)` elements. + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { + // Calculate the minimal number of elements that we need to reserve + // space for. + let min_size = usize::max(self.table.items, min_size); + if min_size == 0 { + *self = Self::new_in(self.table.alloc.clone()); + return; + } + + // Calculate the number of buckets that we need for this number of + // elements. If the calculation overflows then the requested bucket + // count must be larger than what we have right and nothing needs to be + // done. + let min_buckets = match capacity_to_buckets(min_size) { + Some(buckets) => buckets, + None => return, + }; + + // If we have more buckets than we need, shrink the table. + if min_buckets < self.buckets() { + // Fast path if the table is empty + if self.table.items == 0 { + *self = Self::with_capacity_in(min_size, self.table.alloc.clone()); + } else { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + if self + .resize(min_size, hasher, Fallibility::Infallible) + .is_err() + { + unsafe { hint::unreachable_unchecked() } + } + } + } + } + + /// Ensures that at least `additional` items can be inserted into the table + /// without reallocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { + if additional > self.table.growth_left { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + if self + .reserve_rehash(additional, hasher, Fallibility::Infallible) + .is_err() + { + unsafe { hint::unreachable_unchecked() } + } + } + } + + /// Tries to ensure that at least `additional` items can be inserted into + /// the table without reallocation. + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve( + &mut self, + additional: usize, + hasher: impl Fn(&T) -> u64, + ) -> Result<(), TryReserveError> { + if additional > self.table.growth_left { + self.reserve_rehash(additional, hasher, Fallibility::Fallible) + } else { + Ok(()) + } + } + + /// Out-of-line slow path for `reserve` and `try_reserve`. + #[cold] + #[inline(never)] + fn reserve_rehash( + &mut self, + additional: usize, + hasher: impl Fn(&T) -> u64, + fallibility: Fallibility, + ) -> Result<(), TryReserveError> { + unsafe { + self.table.reserve_rehash_inner( + additional, + &|table, index| hasher(table.bucket::(index).as_ref()), + fallibility, + TableLayout::new::(), + if mem::needs_drop::() { + Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) + } else { + None + }, + ) + } + } + + /// Allocates a new table of a different size and moves the contents of the + /// current table into it. + fn resize( + &mut self, + capacity: usize, + hasher: impl Fn(&T) -> u64, + fallibility: Fallibility, + ) -> Result<(), TryReserveError> { + unsafe { + self.table.resize_inner( + capacity, + &|table, index| hasher(table.bucket::(index).as_ref()), + fallibility, + TableLayout::new::(), + ) + } + } + + /// Inserts a new element into the table, and returns its raw bucket. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket { + unsafe { + let mut index = self.table.find_insert_slot(hash); + + // We can avoid growing the table once we have reached our load + // factor if we are replacing a tombstone. This works since the + // number of EMPTY slots does not change in this case. + let old_ctrl = *self.table.ctrl(index); + if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) { + self.reserve(1, hasher); + index = self.table.find_insert_slot(hash); + } + + self.table.record_item_insert_at(index, old_ctrl, hash); + + let bucket = self.bucket(index); + bucket.write(value); + bucket + } + } + + /// Attempts to insert a new element without growing the table and return its raw bucket. + /// + /// Returns an `Err` containing the given element if inserting it would require growing the + /// table. + /// + /// This does not check if the given element already exists in the table. + #[cfg(feature = "raw")] + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result, T> { + unsafe { + match self.table.prepare_insert_no_grow(hash) { + Ok(index) => { + let bucket = self.bucket(index); + bucket.write(value); + Ok(bucket) + } + Err(()) => Err(value), + } + } + } + + /// Inserts a new element into the table, and returns a mutable reference to it. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T { + unsafe { self.insert(hash, value, hasher).as_mut() } + } + + /// Inserts a new element into the table, without growing the table. + /// + /// There must be enough space in the table to insert the new element. + /// + /// This does not check if the given element already exists in the table. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(any(feature = "raw", feature = "rustc-internal-api"))] + pub unsafe fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket { + let (index, old_ctrl) = self.table.prepare_insert_slot(hash); + let bucket = self.table.bucket(index); + + // If we are replacing a DELETED entry then we don't need to update + // the load counter. + self.table.growth_left -= special_is_empty(old_ctrl) as usize; + + bucket.write(value); + self.table.items += 1; + bucket + } + + /// Temporary removes a bucket, applying the given function to the removed + /// element and optionally put back the returned value in the same bucket. + /// + /// Returns `true` if the bucket still contains an element + /// + /// This does not check if the given bucket is actually occupied. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn replace_bucket_with(&mut self, bucket: Bucket, f: F) -> bool + where + F: FnOnce(T) -> Option, + { + let index = self.bucket_index(&bucket); + let old_ctrl = *self.table.ctrl(index); + debug_assert!(is_full(old_ctrl)); + let old_growth_left = self.table.growth_left; + let item = self.remove(bucket); + if let Some(new_item) = f(item) { + self.table.growth_left = old_growth_left; + self.table.set_ctrl(index, old_ctrl); + self.table.items += 1; + self.bucket(index).write(new_item); + true + } else { + false + } + } + + /// Searches for an element in the table. + #[inline] + pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option> { + let result = self.table.find_inner(hash, &mut |index| unsafe { + eq(self.bucket(index).as_ref()) + }); + + // Avoid `Option::map` because it bloats LLVM IR. + match result { + Some(index) => Some(unsafe { self.bucket(index) }), + None => None, + } + } + + /// Gets a reference to an element in the table. + #[inline] + pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.find(hash, eq) { + Some(bucket) => Some(unsafe { bucket.as_ref() }), + None => None, + } + } + + /// Gets a mutable reference to an element in the table. + #[inline] + pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { + // Avoid `Option::map` because it bloats LLVM IR. + match self.find(hash, eq) { + Some(bucket) => Some(unsafe { bucket.as_mut() }), + None => None, + } + } + + /// Attempts to get mutable references to `N` entries in the table at once. + /// + /// Returns an array of length `N` with the results of each query. + /// + /// At most one mutable reference will be returned to any entry. `None` will be returned if any + /// of the hashes are duplicates. `None` will be returned if the hash is not found. + /// + /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to + /// the `i`th key to be looked up. + pub fn get_many_mut( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> Option<[&'_ mut T; N]> { + unsafe { + let ptrs = self.get_many_mut_pointers(hashes, eq)?; + + for (i, &cur) in ptrs.iter().enumerate() { + if ptrs[..i].iter().any(|&prev| ptr::eq::(prev, cur)) { + return None; + } + } + // All bucket are distinct from all previous buckets so we're clear to return the result + // of the lookup. + + // TODO use `MaybeUninit::array_assume_init` here instead once that's stable. + Some(mem::transmute_copy(&ptrs)) + } + } + + pub unsafe fn get_many_unchecked_mut( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> Option<[&'_ mut T; N]> { + let ptrs = self.get_many_mut_pointers(hashes, eq)?; + Some(mem::transmute_copy(&ptrs)) + } + + unsafe fn get_many_mut_pointers( + &mut self, + hashes: [u64; N], + mut eq: impl FnMut(usize, &T) -> bool, + ) -> Option<[*mut T; N]> { + // TODO use `MaybeUninit::uninit_array` here instead once that's stable. + let mut outs: MaybeUninit<[*mut T; N]> = MaybeUninit::uninit(); + let outs_ptr = outs.as_mut_ptr(); + + for (i, &hash) in hashes.iter().enumerate() { + let cur = self.find(hash, |k| eq(i, k))?; + *(*outs_ptr).get_unchecked_mut(i) = cur.as_mut(); + } + + // TODO use `MaybeUninit::array_assume_init` here instead once that's stable. + Some(outs.assume_init()) + } + + /// Returns the number of elements the map can hold without reallocating. + /// + /// This number is a lower bound; the table might be able to hold + /// more, but is guaranteed to be able to hold at least this many. + #[inline] + pub fn capacity(&self) -> usize { + self.table.items + self.table.growth_left + } + + /// Returns the number of elements in the table. + #[inline] + pub fn len(&self) -> usize { + self.table.items + } + + /// Returns `true` if the table contains no elements. + #[inline] + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Returns the number of buckets in the table. + #[inline] + pub fn buckets(&self) -> usize { + self.table.bucket_mask + 1 + } + + /// Returns an iterator over every element in the table. It is up to + /// the caller to ensure that the `RawTable` outlives the `RawIter`. + /// Because we cannot make the `next` method unsafe on the `RawIter` + /// struct, we have to make the `iter` method unsafe. + #[inline] + pub unsafe fn iter(&self) -> RawIter { + let data = Bucket::from_base_index(self.data_end(), 0); + RawIter { + iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), + items: self.table.items, + } + } + + /// Returns an iterator over occupied buckets that could match a given hash. + /// + /// `RawTable` only stores 7 bits of the hash value, so this iterator may + /// return items that have a hash value different than the one provided. You + /// should always validate the returned values before using them. + /// + /// It is up to the caller to ensure that the `RawTable` outlives the + /// `RawIterHash`. Because we cannot make the `next` method unsafe on the + /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "raw")] + pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> { + RawIterHash::new(self, hash) + } + + /// Returns an iterator which removes all elements from the table without + /// freeing the memory. + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain(&mut self) -> RawDrain<'_, T, A> { + unsafe { + let iter = self.iter(); + self.drain_iter_from(iter) + } + } + + /// Returns an iterator which removes all elements from the table without + /// freeing the memory. + /// + /// Iteration starts at the provided iterator's current location. + /// + /// It is up to the caller to ensure that the iterator is valid for this + /// `RawTable` and covers all items that remain in the table. + #[cfg_attr(feature = "inline-more", inline)] + pub unsafe fn drain_iter_from(&mut self, iter: RawIter) -> RawDrain<'_, T, A> { + debug_assert_eq!(iter.len(), self.len()); + RawDrain { + iter, + table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))), + orig_table: NonNull::from(self), + marker: PhantomData, + } + } + + /// Returns an iterator which consumes all elements from the table. + /// + /// Iteration starts at the provided iterator's current location. + /// + /// It is up to the caller to ensure that the iterator is valid for this + /// `RawTable` and covers all items that remain in the table. + pub unsafe fn into_iter_from(self, iter: RawIter) -> RawIntoIter { + debug_assert_eq!(iter.len(), self.len()); + + let alloc = self.table.alloc.clone(); + let allocation = self.into_allocation(); + RawIntoIter { + iter, + allocation, + marker: PhantomData, + alloc, + } + } + + /// Converts the table into a raw allocation. The contents of the table + /// should be dropped using a `RawIter` before freeing the allocation. + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) fn into_allocation(self) -> Option<(NonNull, Layout)> { + let alloc = if self.table.is_empty_singleton() { + None + } else { + // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = match calculate_layout::(self.table.buckets()) { + Some(lco) => lco, + None => unsafe { hint::unreachable_unchecked() }, + }; + Some(( + unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, + layout, + )) + }; + mem::forget(self); + alloc + } +} + +unsafe impl Send for RawTable +where + T: Send, + A: Send, +{ +} +unsafe impl Sync for RawTable +where + T: Sync, + A: Sync, +{ +} + +impl RawTableInner { + #[inline] + const fn new_in(alloc: A) -> Self { + Self { + // Be careful to cast the entire slice to a raw pointer. + ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, + bucket_mask: 0, + items: 0, + growth_left: 0, + alloc, + } + } +} + +impl RawTableInner { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new_uninitialized( + alloc: A, + table_layout: TableLayout, + buckets: usize, + fallibility: Fallibility, + ) -> Result { + debug_assert!(buckets.is_power_of_two()); + + // Avoid `Option::ok_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) { + Some(lco) => lco, + None => return Err(fallibility.capacity_overflow()), + }; + + // We need an additional check to ensure that the allocation doesn't + // exceed `isize::MAX`. We can skip this check on 64-bit systems since + // such allocations will never succeed anyways. + // + // This mirrors what Vec does in the standard library. + if mem::size_of::() < 8 && layout.size() > isize::MAX as usize { + return Err(fallibility.capacity_overflow()); + } + + let ptr: NonNull = match do_alloc(&alloc, layout) { + Ok(block) => block.cast(), + Err(_) => return Err(fallibility.alloc_err(layout)), + }; + + let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); + Ok(Self { + ctrl, + bucket_mask: buckets - 1, + items: 0, + growth_left: bucket_mask_to_capacity(buckets - 1), + alloc, + }) + } + + #[inline] + fn fallible_with_capacity( + alloc: A, + table_layout: TableLayout, + capacity: usize, + fallibility: Fallibility, + ) -> Result { + if capacity == 0 { + Ok(Self::new_in(alloc)) + } else { + unsafe { + let buckets = + capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?; + + let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?; + result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes()); + + Ok(result) + } + } + } + + /// Searches for an empty or deleted bucket which is suitable for inserting + /// a new element and sets the hash for that slot. + /// + /// There must be at least 1 empty bucket in the table. + #[inline] + unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) { + let index = self.find_insert_slot(hash); + let old_ctrl = *self.ctrl(index); + self.set_ctrl_h2(index, hash); + (index, old_ctrl) + } + + /// Searches for an empty or deleted bucket which is suitable for inserting + /// a new element. + /// + /// There must be at least 1 empty bucket in the table. + #[inline] + fn find_insert_slot(&self, hash: u64) -> usize { + let mut probe_seq = self.probe_seq(hash); + loop { + unsafe { + let group = Group::load(self.ctrl(probe_seq.pos)); + if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() { + let result = (probe_seq.pos + bit) & self.bucket_mask; + + // In tables smaller than the group width, trailing control + // bytes outside the range of the table are filled with + // EMPTY entries. These will unfortunately trigger a + // match, but once masked may point to a full bucket that + // is already occupied. We detect this situation here and + // perform a second scan starting at the beginning of the + // table. This second scan is guaranteed to find an empty + // slot (due to the load factor) before hitting the trailing + // control bytes (containing EMPTY). + if unlikely(is_full(*self.ctrl(result))) { + debug_assert!(self.bucket_mask < Group::WIDTH); + debug_assert_ne!(probe_seq.pos, 0); + return Group::load_aligned(self.ctrl(0)) + .match_empty_or_deleted() + .lowest_set_bit_nonzero(); + } + + return result; + } + } + probe_seq.move_next(self.bucket_mask); + } + } + + /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations. + #[inline] + fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option { + let h2_hash = h2(hash); + let mut probe_seq = self.probe_seq(hash); + + loop { + let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; + + for bit in group.match_byte(h2_hash) { + let index = (probe_seq.pos + bit) & self.bucket_mask; + + if likely(eq(index)) { + return Some(index); + } + } + + if likely(group.match_empty().any_bit_set()) { + return None; + } + + probe_seq.move_next(self.bucket_mask); + } + } + + #[allow(clippy::mut_mut)] + #[inline] + unsafe fn prepare_rehash_in_place(&mut self) { + // Bulk convert all full control bytes to DELETED, and all DELETED + // control bytes to EMPTY. This effectively frees up all buckets + // containing a DELETED entry. + for i in (0..self.buckets()).step_by(Group::WIDTH) { + let group = Group::load_aligned(self.ctrl(i)); + let group = group.convert_special_to_empty_and_full_to_deleted(); + group.store_aligned(self.ctrl(i)); + } + + // Fix up the trailing control bytes. See the comments in set_ctrl + // for the handling of tables smaller than the group width. + if self.buckets() < Group::WIDTH { + self.ctrl(0) + .copy_to(self.ctrl(Group::WIDTH), self.buckets()); + } else { + self.ctrl(0) + .copy_to(self.ctrl(self.buckets()), Group::WIDTH); + } + } + + #[inline] + unsafe fn bucket(&self, index: usize) -> Bucket { + debug_assert_ne!(self.bucket_mask, 0); + debug_assert!(index < self.buckets()); + Bucket::from_base_index(self.data_end(), index) + } + + #[inline] + unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { + debug_assert_ne!(self.bucket_mask, 0); + debug_assert!(index < self.buckets()); + let base: *mut u8 = self.data_end().as_ptr(); + base.sub((index + 1) * size_of) + } + + #[inline] + unsafe fn data_end(&self) -> NonNull { + NonNull::new_unchecked(self.ctrl.as_ptr().cast()) + } + + /// Returns an iterator-like object for a probe sequence on the table. + /// + /// This iterator never terminates, but is guaranteed to visit each bucket + /// group exactly once. The loop using `probe_seq` must terminate upon + /// reaching a group containing an empty bucket. + #[inline] + fn probe_seq(&self, hash: u64) -> ProbeSeq { + ProbeSeq { + pos: h1(hash) & self.bucket_mask, + stride: 0, + } + } + + /// Returns the index of a bucket for which a value must be inserted if there is enough rooom + /// in the table, otherwise returns error + #[cfg(feature = "raw")] + #[inline] + unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result { + let index = self.find_insert_slot(hash); + let old_ctrl = *self.ctrl(index); + if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) { + Err(()) + } else { + self.record_item_insert_at(index, old_ctrl, hash); + Ok(index) + } + } + + #[inline] + unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) { + self.growth_left -= usize::from(special_is_empty(old_ctrl)); + self.set_ctrl_h2(index, hash); + self.items += 1; + } + + #[inline] + fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool { + let probe_seq_pos = self.probe_seq(hash).pos; + let probe_index = + |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH; + probe_index(i) == probe_index(new_i) + } + + /// Sets a control byte to the hash, and possibly also the replicated control byte at + /// the end of the array. + #[inline] + unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) { + self.set_ctrl(index, h2(hash)); + } + + #[inline] + unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 { + let prev_ctrl = *self.ctrl(index); + self.set_ctrl_h2(index, hash); + prev_ctrl + } + + /// Sets a control byte, and possibly also the replicated control byte at + /// the end of the array. + #[inline] + unsafe fn set_ctrl(&self, index: usize, ctrl: u8) { + // Replicate the first Group::WIDTH control bytes at the end of + // the array without using a branch: + // - If index >= Group::WIDTH then index == index2. + // - Otherwise index2 == self.bucket_mask + 1 + index. + // + // The very last replicated control byte is never actually read because + // we mask the initial index for unaligned loads, but we write it + // anyways because it makes the set_ctrl implementation simpler. + // + // If there are fewer buckets than Group::WIDTH then this code will + // replicate the buckets at the end of the trailing group. For example + // with 2 buckets and a group size of 4, the control bytes will look + // like this: + // + // Real | Replicated + // --------------------------------------------- + // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] | + // --------------------------------------------- + let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; + + *self.ctrl(index) = ctrl; + *self.ctrl(index2) = ctrl; + } + + /// Returns a pointer to a control byte. + #[inline] + unsafe fn ctrl(&self, index: usize) -> *mut u8 { + debug_assert!(index < self.num_ctrl_bytes()); + self.ctrl.as_ptr().add(index) + } + + #[inline] + fn buckets(&self) -> usize { + self.bucket_mask + 1 + } + + #[inline] + fn num_ctrl_bytes(&self) -> usize { + self.bucket_mask + 1 + Group::WIDTH + } + + #[inline] + fn is_empty_singleton(&self) -> bool { + self.bucket_mask == 0 + } + + #[allow(clippy::mut_mut)] + #[inline] + unsafe fn prepare_resize( + &self, + table_layout: TableLayout, + capacity: usize, + fallibility: Fallibility, + ) -> Result, TryReserveError> { + debug_assert!(self.items <= capacity); + + // Allocate and initialize the new table. + let mut new_table = RawTableInner::fallible_with_capacity( + self.alloc.clone(), + table_layout, + capacity, + fallibility, + )?; + new_table.growth_left -= self.items; + new_table.items = self.items; + + // The hash function may panic, in which case we simply free the new + // table without dropping any elements that may have been copied into + // it. + // + // This guard is also used to free the old table on success, see + // the comment at the bottom of this function. + Ok(guard(new_table, move |self_| { + if !self_.is_empty_singleton() { + self_.free_buckets(table_layout); + } + })) + } + + /// Reserves or rehashes to make room for `additional` more elements. + /// + /// This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations when inlined. + #[allow(clippy::inline_always)] + #[inline(always)] + unsafe fn reserve_rehash_inner( + &mut self, + additional: usize, + hasher: &dyn Fn(&mut Self, usize) -> u64, + fallibility: Fallibility, + layout: TableLayout, + drop: Option, + ) -> Result<(), TryReserveError> { + // Avoid `Option::ok_or_else` because it bloats LLVM IR. + let new_items = match self.items.checked_add(additional) { + Some(new_items) => new_items, + None => return Err(fallibility.capacity_overflow()), + }; + let full_capacity = bucket_mask_to_capacity(self.bucket_mask); + if new_items <= full_capacity / 2 { + // Rehash in-place without re-allocating if we have plenty of spare + // capacity that is locked up due to DELETED entries. + self.rehash_in_place(hasher, layout.size, drop); + Ok(()) + } else { + // Otherwise, conservatively resize to at least the next size up + // to avoid churning deletes into frequent rehashes. + self.resize_inner( + usize::max(new_items, full_capacity + 1), + hasher, + fallibility, + layout, + ) + } + } + + /// Allocates a new table of a different size and moves the contents of the + /// current table into it. + /// + /// This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations when inlined. + #[allow(clippy::inline_always)] + #[inline(always)] + unsafe fn resize_inner( + &mut self, + capacity: usize, + hasher: &dyn Fn(&mut Self, usize) -> u64, + fallibility: Fallibility, + layout: TableLayout, + ) -> Result<(), TryReserveError> { + let mut new_table = self.prepare_resize(layout, capacity, fallibility)?; + + // Copy all elements to the new table. + for i in 0..self.buckets() { + if !is_full(*self.ctrl(i)) { + continue; + } + + // This may panic. + let hash = hasher(self, i); + + // We can use a simpler version of insert() here since: + // - there are no DELETED entries. + // - we know there is enough space in the table. + // - all elements are unique. + let (index, _) = new_table.prepare_insert_slot(hash); + + ptr::copy_nonoverlapping( + self.bucket_ptr(i, layout.size), + new_table.bucket_ptr(index, layout.size), + layout.size, + ); + } + + // We successfully copied all elements without panicking. Now replace + // self with the new table. The old table will have its memory freed but + // the items will not be dropped (since they have been moved into the + // new table). + mem::swap(self, &mut new_table); + + Ok(()) + } + + /// Rehashes the contents of the table in place (i.e. without changing the + /// allocation). + /// + /// If `hasher` panics then some the table's contents may be lost. + /// + /// This uses dynamic dispatch to reduce the amount of + /// code generated, but it is eliminated by LLVM optimizations when inlined. + #[allow(clippy::inline_always)] + #[cfg_attr(feature = "inline-more", inline(always))] + #[cfg_attr(not(feature = "inline-more"), inline)] + unsafe fn rehash_in_place( + &mut self, + hasher: &dyn Fn(&mut Self, usize) -> u64, + size_of: usize, + drop: Option, + ) { + // If the hash function panics then properly clean up any elements + // that we haven't rehashed yet. We unfortunately can't preserve the + // element since we lost their hash and have no way of recovering it + // without risking another panic. + self.prepare_rehash_in_place(); + + let mut guard = guard(self, move |self_| { + if let Some(drop) = drop { + for i in 0..self_.buckets() { + if *self_.ctrl(i) == DELETED { + self_.set_ctrl(i, EMPTY); + drop(self_.bucket_ptr(i, size_of)); + self_.items -= 1; + } + } + } + self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items; + }); + + // At this point, DELETED elements are elements that we haven't + // rehashed yet. Find them and re-insert them at their ideal + // position. + 'outer: for i in 0..guard.buckets() { + if *guard.ctrl(i) != DELETED { + continue; + } + + let i_p = guard.bucket_ptr(i, size_of); + + 'inner: loop { + // Hash the current item + let hash = hasher(*guard, i); + + // Search for a suitable place to put it + let new_i = guard.find_insert_slot(hash); + let new_i_p = guard.bucket_ptr(new_i, size_of); + + // Probing works by scanning through all of the control + // bytes in groups, which may not be aligned to the group + // size. If both the new and old position fall within the + // same unaligned group, then there is no benefit in moving + // it and we can just continue to the next item. + if likely(guard.is_in_same_group(i, new_i, hash)) { + guard.set_ctrl_h2(i, hash); + continue 'outer; + } + + // We are moving the current item to a new position. Write + // our H2 to the control byte of the new position. + let prev_ctrl = guard.replace_ctrl_h2(new_i, hash); + if prev_ctrl == EMPTY { + guard.set_ctrl(i, EMPTY); + // If the target slot is empty, simply move the current + // element into the new slot and clear the old control + // byte. + ptr::copy_nonoverlapping(i_p, new_i_p, size_of); + continue 'outer; + } else { + // If the target slot is occupied, swap the two elements + // and then continue processing the element that we just + // swapped into the old slot. + debug_assert_eq!(prev_ctrl, DELETED); + ptr::swap_nonoverlapping(i_p, new_i_p, size_of); + continue 'inner; + } + } + } + + guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items; + + mem::forget(guard); + } + + #[inline] + unsafe fn free_buckets(&mut self, table_layout: TableLayout) { + // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. + let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) { + Some(lco) => lco, + None => hint::unreachable_unchecked(), + }; + self.alloc.deallocate( + NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)), + layout, + ); + } + + /// Marks all table buckets as empty without dropping their contents. + #[inline] + fn clear_no_drop(&mut self) { + if !self.is_empty_singleton() { + unsafe { + self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes()); + } + } + self.items = 0; + self.growth_left = bucket_mask_to_capacity(self.bucket_mask); + } + + #[inline] + unsafe fn erase(&mut self, index: usize) { + debug_assert!(is_full(*self.ctrl(index))); + let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; + let empty_before = Group::load(self.ctrl(index_before)).match_empty(); + let empty_after = Group::load(self.ctrl(index)).match_empty(); + + // If we are inside a continuous block of Group::WIDTH full or deleted + // cells then a probe window may have seen a full block when trying to + // insert. We therefore need to keep that block non-empty so that + // lookups will continue searching to the next probe window. + // + // Note that in this context `leading_zeros` refers to the bytes at the + // end of a group, while `trailing_zeros` refers to the bytes at the + // beginning of a group. + let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { + DELETED + } else { + self.growth_left += 1; + EMPTY + }; + self.set_ctrl(index, ctrl); + self.items -= 1; + } +} + +impl Clone for RawTable { + fn clone(&self) -> Self { + if self.table.is_empty_singleton() { + Self::new_in(self.table.alloc.clone()) + } else { + unsafe { + // Avoid `Result::ok_or_else` because it bloats LLVM IR. + let new_table = match Self::new_uninitialized( + self.table.alloc.clone(), + self.table.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }; + + // If cloning fails then we need to free the allocation for the + // new table. However we don't run its drop since its control + // bytes are not initialized yet. + let mut guard = guard(ManuallyDrop::new(new_table), |new_table| { + new_table.free_buckets(); + }); + + guard.clone_from_spec(self); + + // Disarm the scope guard and return the newly created table. + ManuallyDrop::into_inner(ScopeGuard::into_inner(guard)) + } + } + } + + fn clone_from(&mut self, source: &Self) { + if source.table.is_empty_singleton() { + *self = Self::new_in(self.table.alloc.clone()); + } else { + unsafe { + // Make sure that if any panics occurs, we clear the table and + // leave it in an empty state. + let mut self_ = guard(self, |self_| { + self_.clear_no_drop(); + }); + + // First, drop all our elements without clearing the control + // bytes. If this panics then the scope guard will clear the + // table, leaking any elements that were not dropped yet. + // + // This leak is unavoidable: we can't try dropping more elements + // since this could lead to another panic and abort the process. + self_.drop_elements(); + + // If necessary, resize our table to match the source. + if self_.buckets() != source.buckets() { + // Skip our drop by using ptr::write. + if !self_.table.is_empty_singleton() { + self_.free_buckets(); + } + (&mut **self_ as *mut Self).write( + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::new_uninitialized( + self_.table.alloc.clone(), + source.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }, + ); + } + + self_.clone_from_spec(source); + + // Disarm the scope guard if cloning was successful. + ScopeGuard::into_inner(self_); + } + } + } +} + +/// Specialization of `clone_from` for `Copy` types +trait RawTableClone { + unsafe fn clone_from_spec(&mut self, source: &Self); +} +impl RawTableClone for RawTable { + default_fn! { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_spec(&mut self, source: &Self) { + self.clone_from_impl(source); + } + } +} +#[cfg(feature = "nightly")] +impl RawTableClone for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_spec(&mut self, source: &Self) { + source + .table + .ctrl(0) + .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); + source + .data_start() + .copy_to_nonoverlapping(self.data_start(), self.table.buckets()); + + self.table.items = source.table.items; + self.table.growth_left = source.table.growth_left; + } +} + +impl RawTable { + /// Common code for clone and clone_from. Assumes: + /// - `self.buckets() == source.buckets()`. + /// - Any existing elements have been dropped. + /// - The control bytes are not initialized yet. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn clone_from_impl(&mut self, source: &Self) { + // Copy the control bytes unchanged. We do this in a single pass + source + .table + .ctrl(0) + .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); + + // The cloning of elements may panic, in which case we need + // to make sure we drop only the elements that have been + // cloned so far. + let mut guard = guard((0, &mut *self), |(index, self_)| { + if mem::needs_drop::() && !self_.is_empty() { + for i in 0..=*index { + if is_full(*self_.table.ctrl(i)) { + self_.bucket(i).drop(); + } + } + } + }); + + for from in source.iter() { + let index = source.bucket_index(&from); + let to = guard.1.bucket(index); + to.write(from.as_ref().clone()); + + // Update the index in case we need to unwind. + guard.0 = index; + } + + // Successfully cloned all items, no need to clean up. + mem::forget(guard); + + self.table.items = source.table.items; + self.table.growth_left = source.table.growth_left; + } + + /// Variant of `clone_from` to use when a hasher is available. + #[cfg(feature = "raw")] + pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) { + // If we have enough capacity in the table, just clear it and insert + // elements one by one. We don't do this if we have the same number of + // buckets as the source since we can just copy the contents directly + // in that case. + if self.table.buckets() != source.table.buckets() + && bucket_mask_to_capacity(self.table.bucket_mask) >= source.len() + { + self.clear(); + + let guard_self = guard(&mut *self, |self_| { + // Clear the partially copied table if a panic occurs, otherwise + // items and growth_left will be out of sync with the contents + // of the table. + self_.clear(); + }); + + unsafe { + for item in source.iter() { + // This may panic. + let item = item.as_ref().clone(); + let hash = hasher(&item); + + // We can use a simpler version of insert() here since: + // - there are no DELETED entries. + // - we know there is enough space in the table. + // - all elements are unique. + let (index, _) = guard_self.table.prepare_insert_slot(hash); + guard_self.bucket(index).write(item); + } + } + + // Successfully cloned all items, no need to clean up. + mem::forget(guard_self); + + self.table.items = source.table.items; + self.table.growth_left -= source.table.items; + } else { + self.clone_from(source); + } + } +} + +impl Default for RawTable { + #[inline] + fn default() -> Self { + Self::new_in(Default::default()) + } +} + +#[cfg(feature = "nightly")] +unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + if !self.table.is_empty_singleton() { + unsafe { + self.drop_elements(); + self.free_buckets(); + } + } + } +} +#[cfg(not(feature = "nightly"))] +impl Drop for RawTable { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + if !self.table.is_empty_singleton() { + unsafe { + self.drop_elements(); + self.free_buckets(); + } + } + } +} + +impl IntoIterator for RawTable { + type Item = T; + type IntoIter = RawIntoIter; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> RawIntoIter { + unsafe { + let iter = self.iter(); + self.into_iter_from(iter) + } + } +} + +/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does +/// not track an item count. +pub(crate) struct RawIterRange { + // Mask of full buckets in the current group. Bits are cleared from this + // mask as each element is processed. + current_group: BitMask, + + // Pointer to the buckets for the current group. + data: Bucket, + + // Pointer to the next group of control bytes, + // Must be aligned to the group size. + next_ctrl: *const u8, + + // Pointer one past the last control byte of this range. + end: *const u8, +} + +impl RawIterRange { + /// Returns a `RawIterRange` covering a subset of a table. + /// + /// The control byte address must be aligned to the group size. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn new(ctrl: *const u8, data: Bucket, len: usize) -> Self { + debug_assert_ne!(len, 0); + debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); + let end = ctrl.add(len); + + // Load the first group and advance ctrl to point to the next group + let current_group = Group::load_aligned(ctrl).match_full(); + let next_ctrl = ctrl.add(Group::WIDTH); + + Self { + current_group, + data, + next_ctrl, + end, + } + } + + /// Splits a `RawIterRange` into two halves. + /// + /// Returns `None` if the remaining range is smaller than or equal to the + /// group width. + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "rayon")] + pub(crate) fn split(mut self) -> (Self, Option>) { + unsafe { + if self.end <= self.next_ctrl { + // Nothing to split if the group that we are current processing + // is the last one. + (self, None) + } else { + // len is the remaining number of elements after the group that + // we are currently processing. It must be a multiple of the + // group size (small tables are caught by the check above). + let len = offset_from(self.end, self.next_ctrl); + debug_assert_eq!(len % Group::WIDTH, 0); + + // Split the remaining elements into two halves, but round the + // midpoint down in case there is an odd number of groups + // remaining. This ensures that: + // - The tail is at least 1 group long. + // - The split is roughly even considering we still have the + // current group to process. + let mid = (len / 2) & !(Group::WIDTH - 1); + + let tail = Self::new( + self.next_ctrl.add(mid), + self.data.next_n(Group::WIDTH).next_n(mid), + len - mid, + ); + debug_assert_eq!( + self.data.next_n(Group::WIDTH).next_n(mid).ptr, + tail.data.ptr + ); + debug_assert_eq!(self.end, tail.end); + self.end = self.next_ctrl.add(mid); + debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl); + (self, Some(tail)) + } + } + } + + /// # Safety + /// If DO_CHECK_PTR_RANGE is false, caller must ensure that we never try to iterate + /// after yielding all elements. + #[cfg_attr(feature = "inline-more", inline)] + unsafe fn next_impl(&mut self) -> Option> { + loop { + if let Some(index) = self.current_group.lowest_set_bit() { + self.current_group = self.current_group.remove_lowest_bit(); + return Some(self.data.next_n(index)); + } + + if DO_CHECK_PTR_RANGE && self.next_ctrl >= self.end { + return None; + } + + // We might read past self.end up to the next group boundary, + // but this is fine because it only occurs on tables smaller + // than the group size where the trailing control bytes are all + // EMPTY. On larger tables self.end is guaranteed to be aligned + // to the group size (since tables are power-of-two sized). + self.current_group = Group::load_aligned(self.next_ctrl).match_full(); + self.data = self.data.next_n(Group::WIDTH); + self.next_ctrl = self.next_ctrl.add(Group::WIDTH); + } + } +} + +// We make raw iterators unconditionally Send and Sync, and let the PhantomData +// in the actual iterator implementations determine the real Send/Sync bounds. +unsafe impl Send for RawIterRange {} +unsafe impl Sync for RawIterRange {} + +impl Clone for RawIterRange { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + data: self.data.clone(), + next_ctrl: self.next_ctrl, + current_group: self.current_group, + end: self.end, + } + } +} + +impl Iterator for RawIterRange { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option> { + unsafe { + // SAFETY: We set checker flag to true. + self.next_impl::() + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + // We don't have an item count, so just guess based on the range size. + let remaining_buckets = if self.end > self.next_ctrl { + unsafe { offset_from(self.end, self.next_ctrl) } + } else { + 0 + }; + + // Add a group width to include the group we are currently processing. + (0, Some(Group::WIDTH + remaining_buckets)) + } +} + +impl FusedIterator for RawIterRange {} + +/// Iterator which returns a raw pointer to every full bucket in the table. +/// +/// For maximum flexibility this iterator is not bound by a lifetime, but you +/// must observe several rules when using it: +/// - You must not free the hash table while iterating (including via growing/shrinking). +/// - It is fine to erase a bucket that has been yielded by the iterator. +/// - Erasing a bucket that has not yet been yielded by the iterator may still +/// result in the iterator yielding that bucket (unless `reflect_remove` is called). +/// - It is unspecified whether an element inserted after the iterator was +/// created will be yielded by that iterator (unless `reflect_insert` is called). +/// - The order in which the iterator yields bucket is unspecified and may +/// change in the future. +pub struct RawIter { + pub(crate) iter: RawIterRange, + items: usize, +} + +impl RawIter { + /// Refresh the iterator so that it reflects a removal from the given bucket. + /// + /// For the iterator to remain valid, this method must be called once + /// for each removed bucket before `next` is called again. + /// + /// This method should be called _before_ the removal is made. It is not necessary to call this + /// method if you are removing an item that this iterator yielded in the past. + #[cfg(feature = "raw")] + pub fn reflect_remove(&mut self, b: &Bucket) { + self.reflect_toggle_full(b, false); + } + + /// Refresh the iterator so that it reflects an insertion into the given bucket. + /// + /// For the iterator to remain valid, this method must be called once + /// for each insert before `next` is called again. + /// + /// This method does not guarantee that an insertion of a bucket with a greater + /// index than the last one yielded will be reflected in the iterator. + /// + /// This method should be called _after_ the given insert is made. + #[cfg(feature = "raw")] + pub fn reflect_insert(&mut self, b: &Bucket) { + self.reflect_toggle_full(b, true); + } + + /// Refresh the iterator so that it reflects a change to the state of the given bucket. + #[cfg(feature = "raw")] + fn reflect_toggle_full(&mut self, b: &Bucket, is_insert: bool) { + unsafe { + if b.as_ptr() > self.iter.data.as_ptr() { + // The iterator has already passed the bucket's group. + // So the toggle isn't relevant to this iterator. + return; + } + + if self.iter.next_ctrl < self.iter.end + && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr() + { + // The iterator has not yet reached the bucket's group. + // We don't need to reload anything, but we do need to adjust the item count. + + if cfg!(debug_assertions) { + // Double-check that the user isn't lying to us by checking the bucket state. + // To do that, we need to find its control byte. We know that self.iter.data is + // at self.iter.next_ctrl - Group::WIDTH, so we work from there: + let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr()); + let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset); + // This method should be called _before_ a removal, or _after_ an insert, + // so in both cases the ctrl byte should indicate that the bucket is full. + assert!(is_full(*ctrl)); + } + + if is_insert { + self.items += 1; + } else { + self.items -= 1; + } + + return; + } + + // The iterator is at the bucket group that the toggled bucket is in. + // We need to do two things: + // + // - Determine if the iterator already yielded the toggled bucket. + // If it did, we're done. + // - Otherwise, update the iterator cached group so that it won't + // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket. + // We'll also need to update the item count accordingly. + if let Some(index) = self.iter.current_group.lowest_set_bit() { + let next_bucket = self.iter.data.next_n(index); + if b.as_ptr() > next_bucket.as_ptr() { + // The toggled bucket is "before" the bucket the iterator would yield next. We + // therefore don't need to do anything --- the iterator has already passed the + // bucket in question. + // + // The item count must already be correct, since a removal or insert "prior" to + // the iterator's position wouldn't affect the item count. + } else { + // The removed bucket is an upcoming bucket. We need to make sure it does _not_ + // get yielded, and also that it's no longer included in the item count. + // + // NOTE: We can't just reload the group here, both since that might reflect + // inserts we've already passed, and because that might inadvertently unset the + // bits for _other_ removals. If we do that, we'd have to also decrement the + // item count for those other bits that we unset. But the presumably subsequent + // call to reflect for those buckets might _also_ decrement the item count. + // Instead, we _just_ flip the bit for the particular bucket the caller asked + // us to reflect. + let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr()); + let was_full = self.iter.current_group.flip(our_bit); + debug_assert_ne!(was_full, is_insert); + + if is_insert { + self.items += 1; + } else { + self.items -= 1; + } + + if cfg!(debug_assertions) { + if b.as_ptr() == next_bucket.as_ptr() { + // The removed bucket should no longer be next + debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index)); + } else { + // We should not have changed what bucket comes next. + debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index)); + } + } + } + } else { + // We must have already iterated past the removed item. + } + } + } + + unsafe fn drop_elements(&mut self) { + if mem::needs_drop::() && self.len() != 0 { + for item in self { + item.drop(); + } + } + } +} + +impl Clone for RawIter { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + iter: self.iter.clone(), + items: self.items, + } + } +} + +impl Iterator for RawIter { + type Item = Bucket; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option> { + // Inner iterator iterates over buckets + // so it can do unnecessary work if we already yielded all items. + if self.items == 0 { + return None; + } + + let nxt = unsafe { + // SAFETY: We check number of items to yield using `items` field. + self.iter.next_impl::() + }; + + if nxt.is_some() { + self.items -= 1; + } + + nxt + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (self.items, Some(self.items)) + } +} + +impl ExactSizeIterator for RawIter {} +impl FusedIterator for RawIter {} + +/// Iterator which consumes a table and returns elements. +pub struct RawIntoIter { + iter: RawIter, + allocation: Option<(NonNull, Layout)>, + marker: PhantomData, + alloc: A, +} + +impl RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> RawIter { + self.iter.clone() + } +} + +unsafe impl Send for RawIntoIter +where + T: Send, + A: Send, +{ +} +unsafe impl Sync for RawIntoIter +where + T: Sync, + A: Sync, +{ +} + +#[cfg(feature = "nightly")] +unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements + self.iter.drop_elements(); + + // Free the table + if let Some((ptr, layout)) = self.allocation { + self.alloc.deallocate(ptr, layout); + } + } + } +} +#[cfg(not(feature = "nightly"))] +impl Drop for RawIntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements + self.iter.drop_elements(); + + // Free the table + if let Some((ptr, layout)) = self.allocation { + self.alloc.deallocate(ptr, layout); + } + } + } +} + +impl Iterator for RawIntoIter { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + unsafe { Some(self.iter.next()?.read()) } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for RawIntoIter {} +impl FusedIterator for RawIntoIter {} + +/// Iterator which consumes elements without freeing the table storage. +pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { + iter: RawIter, + + // The table is moved into the iterator for the duration of the drain. This + // ensures that an empty table is left if the drain iterator is leaked + // without dropping. + table: ManuallyDrop>, + orig_table: NonNull>, + + // We don't use a &'a mut RawTable because we want RawDrain to be + // covariant over T. + marker: PhantomData<&'a RawTable>, +} + +impl RawDrain<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> RawIter { + self.iter.clone() + } +} + +unsafe impl Send for RawDrain<'_, T, A> +where + T: Send, + A: Send, +{ +} +unsafe impl Sync for RawDrain<'_, T, A> +where + T: Sync, + A: Sync, +{ +} + +impl Drop for RawDrain<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + unsafe { + // Drop all remaining elements. Note that this may panic. + self.iter.drop_elements(); + + // Reset the contents of the table now that all elements have been + // dropped. + self.table.clear_no_drop(); + + // Move the now empty table back to its original location. + self.orig_table + .as_ptr() + .copy_from_nonoverlapping(&*self.table, 1); + } + } +} + +impl Iterator for RawDrain<'_, T, A> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + unsafe { + let item = self.iter.next()?; + Some(item.read()) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl ExactSizeIterator for RawDrain<'_, T, A> {} +impl FusedIterator for RawDrain<'_, T, A> {} + +/// Iterator over occupied buckets that could match a given hash. +/// +/// `RawTable` only stores 7 bits of the hash value, so this iterator may return +/// items that have a hash value different than the one provided. You should +/// always validate the returned values before using them. +pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> { + inner: RawIterHashInner<'a, A>, + _marker: PhantomData, +} + +struct RawIterHashInner<'a, A: Allocator + Clone> { + table: &'a RawTableInner, + + // The top 7 bits of the hash. + h2_hash: u8, + + // The sequence of groups to probe in the search. + probe_seq: ProbeSeq, + + group: Group, + + // The elements within the group with a matching h2-hash. + bitmask: BitMaskIter, +} + +impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "raw")] + fn new(table: &'a RawTable, hash: u64) -> Self { + RawIterHash { + inner: RawIterHashInner::new(&table.table, hash), + _marker: PhantomData, + } + } +} +impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> { + #[cfg_attr(feature = "inline-more", inline)] + #[cfg(feature = "raw")] + fn new(table: &'a RawTableInner, hash: u64) -> Self { + unsafe { + let h2_hash = h2(hash); + let probe_seq = table.probe_seq(hash); + let group = Group::load(table.ctrl(probe_seq.pos)); + let bitmask = group.match_byte(h2_hash).into_iter(); + + RawIterHashInner { + table, + h2_hash, + probe_seq, + group, + bitmask, + } + } + } +} + +impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> { + type Item = Bucket; + + fn next(&mut self) -> Option> { + unsafe { + match self.inner.next() { + Some(index) => Some(self.inner.table.bucket(index)), + None => None, + } + } + } +} + +impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> { + type Item = usize; + + fn next(&mut self) -> Option { + unsafe { + loop { + if let Some(bit) = self.bitmask.next() { + let index = (self.probe_seq.pos + bit) & self.table.bucket_mask; + return Some(index); + } + if likely(self.group.match_empty().any_bit_set()) { + return None; + } + self.probe_seq.move_next(self.table.bucket_mask); + self.group = Group::load(self.table.ctrl(self.probe_seq.pos)); + self.bitmask = self.group.match_byte(self.h2_hash).into_iter(); + } + } + } +} + +#[cfg(test)] +mod test_map { + use super::*; + + fn rehash_in_place(table: &mut RawTable, hasher: impl Fn(&T) -> u64) { + unsafe { + table.table.rehash_in_place( + &|table, index| hasher(table.bucket::(index).as_ref()), + mem::size_of::(), + if mem::needs_drop::() { + Some(mem::transmute(ptr::drop_in_place:: as unsafe fn(*mut T))) + } else { + None + }, + ); + } + } + + #[test] + fn rehash() { + let mut table = RawTable::new(); + let hasher = |i: &u64| *i; + for i in 0..100 { + table.insert(i, i, hasher); + } + + for i in 0..100 { + unsafe { + assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i)); + } + assert!(table.find(i + 100, |x| *x == i + 100).is_none()); + } + + rehash_in_place(&mut table, hasher); + + for i in 0..100 { + unsafe { + assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i)); + } + assert!(table.find(i + 100, |x| *x == i + 100).is_none()); + } + } +} diff --git a/vendor/hashbrown-0.12.3/src/raw/sse2.rs b/vendor/hashbrown-0.12.3/src/raw/sse2.rs new file mode 100644 index 0000000..a0bf6da --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/raw/sse2.rs @@ -0,0 +1,146 @@ +use super::bitmask::BitMask; +use super::EMPTY; +use core::mem; + +#[cfg(target_arch = "x86")] +use core::arch::x86; +#[cfg(target_arch = "x86_64")] +use core::arch::x86_64 as x86; + +pub type BitMaskWord = u16; +pub const BITMASK_STRIDE: usize = 1; +pub const BITMASK_MASK: BitMaskWord = 0xffff; + +/// Abstraction over a group of control bytes which can be scanned in +/// parallel. +/// +/// This implementation uses a 128-bit SSE value. +#[derive(Copy, Clone)] +pub struct Group(x86::__m128i); + +// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859 +#[allow(clippy::use_self)] +impl Group { + /// Number of bytes in the group. + pub const WIDTH: usize = mem::size_of::(); + + /// Returns a full group of empty bytes, suitable for use as the initial + /// value for an empty hash table. + /// + /// This is guaranteed to be aligned to the group size. + #[inline] + #[allow(clippy::items_after_statements)] + pub const fn static_empty() -> &'static [u8; Group::WIDTH] { + #[repr(C)] + struct AlignedBytes { + _align: [Group; 0], + bytes: [u8; Group::WIDTH], + } + const ALIGNED_BYTES: AlignedBytes = AlignedBytes { + _align: [], + bytes: [EMPTY; Group::WIDTH], + }; + &ALIGNED_BYTES.bytes + } + + /// Loads a group of bytes starting at the given address. + #[inline] + #[allow(clippy::cast_ptr_alignment)] // unaligned load + pub unsafe fn load(ptr: *const u8) -> Self { + Group(x86::_mm_loadu_si128(ptr.cast())) + } + + /// Loads a group of bytes starting at the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub unsafe fn load_aligned(ptr: *const u8) -> Self { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + Group(x86::_mm_load_si128(ptr.cast())) + } + + /// Stores the group of bytes to the given address, which must be + /// aligned to `mem::align_of::()`. + #[inline] + #[allow(clippy::cast_ptr_alignment)] + pub unsafe fn store_aligned(self, ptr: *mut u8) { + // FIXME: use align_offset once it stabilizes + debug_assert_eq!(ptr as usize & (mem::align_of::() - 1), 0); + x86::_mm_store_si128(ptr.cast(), self.0); + } + + /// Returns a `BitMask` indicating all bytes in the group which have + /// the given value. + #[inline] + pub fn match_byte(self, byte: u8) -> BitMask { + #[allow( + clippy::cast_possible_wrap, // byte: u8 as i8 + // byte: i32 as u16 + // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the + // upper 16-bits of the i32 are zeroed: + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] + unsafe { + let cmp = x86::_mm_cmpeq_epi8(self.0, x86::_mm_set1_epi8(byte as i8)); + BitMask(x86::_mm_movemask_epi8(cmp) as u16) + } + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY`. + #[inline] + pub fn match_empty(self) -> BitMask { + self.match_byte(EMPTY) + } + + /// Returns a `BitMask` indicating all bytes in the group which are + /// `EMPTY` or `DELETED`. + #[inline] + pub fn match_empty_or_deleted(self) -> BitMask { + #[allow( + // byte: i32 as u16 + // note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the + // upper 16-bits of the i32 are zeroed: + clippy::cast_sign_loss, + clippy::cast_possible_truncation + )] + unsafe { + // A byte is EMPTY or DELETED iff the high bit is set + BitMask(x86::_mm_movemask_epi8(self.0) as u16) + } + } + + /// Returns a `BitMask` indicating all bytes in the group which are full. + #[inline] + pub fn match_full(&self) -> BitMask { + self.match_empty_or_deleted().invert() + } + + /// Performs the following transformation on all bytes in the group: + /// - `EMPTY => EMPTY` + /// - `DELETED => EMPTY` + /// - `FULL => DELETED` + #[inline] + pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self { + // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111 + // and high_bit = 0 (FULL) to 1000_0000 + // + // Here's this logic expanded to concrete values: + // let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false) + // 1111_1111 | 1000_0000 = 1111_1111 + // 0000_0000 | 1000_0000 = 1000_0000 + #[allow( + clippy::cast_possible_wrap, // byte: 0x80_u8 as i8 + )] + unsafe { + let zero = x86::_mm_setzero_si128(); + let special = x86::_mm_cmpgt_epi8(zero, self.0); + Group(x86::_mm_or_si128( + special, + x86::_mm_set1_epi8(0x80_u8 as i8), + )) + } + } +} diff --git a/vendor/hashbrown-0.12.3/src/rustc_entry.rs b/vendor/hashbrown-0.12.3/src/rustc_entry.rs new file mode 100644 index 0000000..2e84595 --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/rustc_entry.rs @@ -0,0 +1,630 @@ +use self::RustcEntry::*; +use crate::map::{make_insert_hash, Drain, HashMap, IntoIter, Iter, IterMut}; +use crate::raw::{Allocator, Bucket, Global, RawTable}; +use core::fmt::{self, Debug}; +use core::hash::{BuildHasher, Hash}; +use core::mem; + +impl HashMap +where + K: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + /// Gets the given key's corresponding entry in the map for in-place manipulation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut letters = HashMap::new(); + /// + /// for ch in "a short treatise on fungi".chars() { + /// let counter = letters.rustc_entry(ch).or_insert(0); + /// *counter += 1; + /// } + /// + /// assert_eq!(letters[&'s'], 2); + /// assert_eq!(letters[&'t'], 3); + /// assert_eq!(letters[&'u'], 1); + /// assert_eq!(letters.get(&'y'), None); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V, A> { + let hash = make_insert_hash(&self.hash_builder, &key); + if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) { + RustcEntry::Occupied(RustcOccupiedEntry { + key: Some(key), + elem, + table: &mut self.table, + }) + } else { + // Ideally we would put this in VacantEntry::insert, but Entry is not + // generic over the BuildHasher and adding a generic parameter would be + // a breaking change. + self.reserve(1); + + RustcEntry::Vacant(RustcVacantEntry { + hash, + key, + table: &mut self.table, + }) + } + } +} + +/// A view into a single entry in a map, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`rustc_entry`] method on [`HashMap`]. +/// +/// [`HashMap`]: struct.HashMap.html +/// [`rustc_entry`]: struct.HashMap.html#method.rustc_entry +pub enum RustcEntry<'a, K, V, A = Global> +where + A: Allocator + Clone, +{ + /// An occupied entry. + Occupied(RustcOccupiedEntry<'a, K, V, A>), + + /// A vacant entry. + Vacant(RustcVacantEntry<'a, K, V, A>), +} + +impl Debug for RustcEntry<'_, K, V, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +/// A view into an occupied entry in a `HashMap`. +/// It is part of the [`RustcEntry`] enum. +/// +/// [`RustcEntry`]: enum.RustcEntry.html +pub struct RustcOccupiedEntry<'a, K, V, A = Global> +where + A: Allocator + Clone, +{ + key: Option, + elem: Bucket<(K, V)>, + table: &'a mut RawTable<(K, V), A>, +} + +unsafe impl Send for RustcOccupiedEntry<'_, K, V, A> +where + K: Send, + V: Send, + A: Allocator + Clone + Send, +{ +} +unsafe impl Sync for RustcOccupiedEntry<'_, K, V, A> +where + K: Sync, + V: Sync, + A: Allocator + Clone + Sync, +{ +} + +impl Debug for RustcOccupiedEntry<'_, K, V, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("key", self.key()) + .field("value", self.get()) + .finish() + } +} + +/// A view into a vacant entry in a `HashMap`. +/// It is part of the [`RustcEntry`] enum. +/// +/// [`RustcEntry`]: enum.RustcEntry.html +pub struct RustcVacantEntry<'a, K, V, A = Global> +where + A: Allocator + Clone, +{ + hash: u64, + key: K, + table: &'a mut RawTable<(K, V), A>, +} + +impl Debug for RustcVacantEntry<'_, K, V, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntry").field(self.key()).finish() + } +} + +impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> { + /// Sets the value of the entry, and returns a RustcOccupiedEntry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// let entry = map.rustc_entry("horseyland").insert(37); + /// + /// assert_eq!(entry.key(), &"horseyland"); + /// ``` + pub fn insert(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> { + match self { + Vacant(entry) => entry.insert_entry(value), + Occupied(mut entry) => { + entry.insert(value); + entry + } + } + } + + /// Ensures a value is in the entry by inserting the default if empty, and returns + /// a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.rustc_entry("poneyland").or_insert(3); + /// assert_eq!(map["poneyland"], 3); + /// + /// *map.rustc_entry("poneyland").or_insert(10) *= 2; + /// assert_eq!(map["poneyland"], 6); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self, default: V) -> &'a mut V + where + K: Hash, + { + match self { + Occupied(entry) => entry.into_mut(), + Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, String> = HashMap::new(); + /// let s = "hoho".to_string(); + /// + /// map.rustc_entry("poneyland").or_insert_with(|| s); + /// + /// assert_eq!(map["poneyland"], "hoho".to_string()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert_with V>(self, default: F) -> &'a mut V + where + K: Hash, + { + match self { + Occupied(entry) => entry.into_mut(), + Vacant(entry) => entry.insert(default()), + } + } + + /// Returns a reference to this entry's key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + match *self { + Occupied(ref entry) => entry.key(), + Vacant(ref entry) => entry.key(), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// map.rustc_entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 42); + /// + /// map.rustc_entry("poneyland") + /// .and_modify(|e| { *e += 1 }) + /// .or_insert(42); + /// assert_eq!(map["poneyland"], 43); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn and_modify(self, f: F) -> Self + where + F: FnOnce(&mut V), + { + match self { + Occupied(mut entry) => { + f(entry.get_mut()); + Occupied(entry) + } + Vacant(entry) => Vacant(entry), + } + } +} + +impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> { + /// Ensures a value is in the entry by inserting the default value if empty, + /// and returns a mutable reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// # fn main() { + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, Option> = HashMap::new(); + /// map.rustc_entry("poneyland").or_default(); + /// + /// assert_eq!(map["poneyland"], None); + /// # } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_default(self) -> &'a mut V + where + K: Hash, + { + match self { + Occupied(entry) => entry.into_mut(), + Vacant(entry) => entry.insert(Default::default()), + } + } +} + +impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> { + /// Gets a reference to the key in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + unsafe { &self.elem.as_ref().0 } + } + + /// Take the ownership of the key and value from the map. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// // We delete the entry from the map. + /// o.remove_entry(); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove_entry(self) -> (K, V) { + unsafe { self.table.remove(self.elem) } + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// assert_eq!(o.get(), &12); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &V { + unsafe { &self.elem.as_ref().1 } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// If you need a reference to the `RustcOccupiedEntry` which may outlive the + /// destruction of the `RustcEntry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") { + /// *o.get_mut() += 10; + /// assert_eq!(*o.get(), 22); + /// + /// // We can use the same RustcEntry multiple times. + /// *o.get_mut() += 2; + /// } + /// + /// assert_eq!(map["poneyland"], 24); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Converts the RustcOccupiedEntry into a mutable reference to the value in the entry + /// with a lifetime bound to the map itself. + /// + /// If you need multiple references to the `RustcOccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// assert_eq!(map["poneyland"], 12); + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// *o.into_mut() += 10; + /// } + /// + /// assert_eq!(map["poneyland"], 22); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_mut(self) -> &'a mut V { + unsafe { &mut self.elem.as_mut().1 } + } + + /// Sets the value of the entry, and returns the entry's old value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") { + /// assert_eq!(o.insert(15), 12); + /// } + /// + /// assert_eq!(map["poneyland"], 15); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + /// Takes the value out of the entry, and returns it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// map.rustc_entry("poneyland").or_insert(12); + /// + /// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") { + /// assert_eq!(o.remove(), 12); + /// } + /// + /// assert_eq!(map.contains_key("poneyland"), false); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> V { + self.remove_entry().1 + } + + /// Replaces the entry, returning the old key and value. The new key in the hash map will be + /// the key used to create this entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{RustcEntry, HashMap}; + /// use std::rc::Rc; + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// map.insert(Rc::new("Stringthing".to_string()), 15); + /// + /// let my_key = Rc::new("Stringthing".to_string()); + /// + /// if let RustcEntry::Occupied(entry) = map.rustc_entry(my_key) { + /// // Also replace the key with a handle to our other key. + /// let (old_key, old_value): (Rc, u32) = entry.replace_entry(16); + /// } + /// + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_entry(self, value: V) -> (K, V) { + let entry = unsafe { self.elem.as_mut() }; + + let old_key = mem::replace(&mut entry.0, self.key.unwrap()); + let old_value = mem::replace(&mut entry.1, value); + + (old_key, old_value) + } + + /// Replaces the key in the hash map with the key used to create this entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_map::{RustcEntry, HashMap}; + /// use std::rc::Rc; + /// + /// let mut map: HashMap, u32> = HashMap::new(); + /// let mut known_strings: Vec> = Vec::new(); + /// + /// // Initialise known strings, run program, etc. + /// + /// reclaim_memory(&mut map, &known_strings); + /// + /// fn reclaim_memory(map: &mut HashMap, u32>, known_strings: &[Rc] ) { + /// for s in known_strings { + /// if let RustcEntry::Occupied(entry) = map.rustc_entry(s.clone()) { + /// // Replaces the entry's key with our version of it in `known_strings`. + /// entry.replace_key(); + /// } + /// } + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace_key(self) -> K { + let entry = unsafe { self.elem.as_mut() }; + mem::replace(&mut entry.0, self.key.unwrap()) + } +} + +impl<'a, K, V, A: Allocator + Clone> RustcVacantEntry<'a, K, V, A> { + /// Gets a reference to the key that would be used when inserting a value + /// through the `RustcVacantEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn key(&self) -> &K { + &self.key + } + + /// Take ownership of the key. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") { + /// v.into_key(); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_key(self) -> K { + self.key + } + + /// Sets the value of the entry with the RustcVacantEntry's key, + /// and returns a mutable reference to it. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let RustcEntry::Vacant(o) = map.rustc_entry("poneyland") { + /// o.insert(37); + /// } + /// assert_eq!(map["poneyland"], 37); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self, value: V) -> &'a mut V { + unsafe { + let bucket = self.table.insert_no_grow(self.hash, (self.key, value)); + &mut bucket.as_mut().1 + } + } + + /// Sets the value of the entry with the RustcVacantEntry's key, + /// and returns a RustcOccupiedEntry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashMap; + /// use hashbrown::hash_map::RustcEntry; + /// + /// let mut map: HashMap<&str, u32> = HashMap::new(); + /// + /// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") { + /// let o = v.insert_entry(37); + /// assert_eq!(o.get(), &37); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_entry(self, value: V) -> RustcOccupiedEntry<'a, K, V, A> { + let bucket = unsafe { self.table.insert_no_grow(self.hash, (self.key, value)) }; + RustcOccupiedEntry { + key: None, + elem: bucket, + table: self.table, + } + } +} + +impl IterMut<'_, K, V> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_iter(&self) -> Iter<'_, K, V> { + self.iter() + } +} + +impl IntoIter { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_iter(&self) -> Iter<'_, K, V> { + self.iter() + } +} + +impl Drain<'_, K, V> { + /// Returns a iterator of references over the remaining items. + #[cfg_attr(feature = "inline-more", inline)] + pub fn rustc_iter(&self) -> Iter<'_, K, V> { + self.iter() + } +} diff --git a/vendor/hashbrown-0.12.3/src/scopeguard.rs b/vendor/hashbrown-0.12.3/src/scopeguard.rs new file mode 100644 index 0000000..f85e6ab --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/scopeguard.rs @@ -0,0 +1,74 @@ +// Extracted from the scopeguard crate +use core::{ + mem, + ops::{Deref, DerefMut}, + ptr, +}; + +pub struct ScopeGuard +where + F: FnMut(&mut T), +{ + dropfn: F, + value: T, +} + +#[inline] +pub fn guard(value: T, dropfn: F) -> ScopeGuard +where + F: FnMut(&mut T), +{ + ScopeGuard { dropfn, value } +} + +impl ScopeGuard +where + F: FnMut(&mut T), +{ + #[inline] + pub fn into_inner(guard: Self) -> T { + // Cannot move out of Drop-implementing types, so + // ptr::read the value and forget the guard. + unsafe { + let value = ptr::read(&guard.value); + // read the closure so that it is dropped, and assign it to a local + // variable to ensure that it is only dropped after the guard has + // been forgotten. (In case the Drop impl of the closure, or that + // of any consumed captured variable, panics). + let _dropfn = ptr::read(&guard.dropfn); + mem::forget(guard); + value + } + } +} + +impl Deref for ScopeGuard +where + F: FnMut(&mut T), +{ + type Target = T; + #[inline] + fn deref(&self) -> &T { + &self.value + } +} + +impl DerefMut for ScopeGuard +where + F: FnMut(&mut T), +{ + #[inline] + fn deref_mut(&mut self) -> &mut T { + &mut self.value + } +} + +impl Drop for ScopeGuard +where + F: FnMut(&mut T), +{ + #[inline] + fn drop(&mut self) { + (self.dropfn)(&mut self.value); + } +} diff --git a/vendor/hashbrown-0.12.3/src/set.rs b/vendor/hashbrown-0.12.3/src/set.rs new file mode 100644 index 0000000..2a4dcea --- /dev/null +++ b/vendor/hashbrown-0.12.3/src/set.rs @@ -0,0 +1,2790 @@ +use crate::TryReserveError; +use alloc::borrow::ToOwned; +use core::borrow::Borrow; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::iter::{Chain, FromIterator, FusedIterator}; +use core::mem; +use core::ops::{BitAnd, BitOr, BitXor, Sub}; + +use super::map::{self, ConsumeAllOnDrop, DefaultHashBuilder, DrainFilterInner, HashMap, Keys}; +use crate::raw::{Allocator, Global}; + +// Future Optimization (FIXME!) +// ============================= +// +// Iteration over zero sized values is a noop. There is no need +// for `bucket.val` in the case of HashSet. I suppose we would need HKT +// to get rid of it properly. + +/// A hash set implemented as a `HashMap` where the value is `()`. +/// +/// As with the [`HashMap`] type, a `HashSet` requires that the elements +/// implement the [`Eq`] and [`Hash`] traits. This can frequently be achieved by +/// using `#[derive(PartialEq, Eq, Hash)]`. If you implement these yourself, +/// it is important that the following property holds: +/// +/// ```text +/// k1 == k2 -> hash(k1) == hash(k2) +/// ``` +/// +/// In other words, if two keys are equal, their hashes must be equal. +/// +/// +/// It is a logic error for an item to be modified in such a way that the +/// item's hash, as determined by the [`Hash`] trait, or its equality, as +/// determined by the [`Eq`] trait, changes while it is in the set. This is +/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or +/// unsafe code. +/// +/// It is also a logic error for the [`Hash`] implementation of a key to panic. +/// This is generally only possible if the trait is implemented manually. If a +/// panic does occur then the contents of the `HashSet` may become corrupted and +/// some items may be dropped from the table. +/// +/// # Examples +/// +/// ``` +/// use hashbrown::HashSet; +/// // Type inference lets us omit an explicit type signature (which +/// // would be `HashSet` in this example). +/// let mut books = HashSet::new(); +/// +/// // Add some books. +/// books.insert("A Dance With Dragons".to_string()); +/// books.insert("To Kill a Mockingbird".to_string()); +/// books.insert("The Odyssey".to_string()); +/// books.insert("The Great Gatsby".to_string()); +/// +/// // Check for a specific one. +/// if !books.contains("The Winds of Winter") { +/// println!("We have {} books, but The Winds of Winter ain't one.", +/// books.len()); +/// } +/// +/// // Remove a book. +/// books.remove("The Odyssey"); +/// +/// // Iterate over everything. +/// for book in &books { +/// println!("{}", book); +/// } +/// ``` +/// +/// The easiest way to use `HashSet` with a custom type is to derive +/// [`Eq`] and [`Hash`]. We must also derive [`PartialEq`]. This will in the +/// future be implied by [`Eq`]. +/// +/// ``` +/// use hashbrown::HashSet; +/// #[derive(Hash, Eq, PartialEq, Debug)] +/// struct Viking { +/// name: String, +/// power: usize, +/// } +/// +/// let mut vikings = HashSet::new(); +/// +/// vikings.insert(Viking { name: "Einar".to_string(), power: 9 }); +/// vikings.insert(Viking { name: "Einar".to_string(), power: 9 }); +/// vikings.insert(Viking { name: "Olaf".to_string(), power: 4 }); +/// vikings.insert(Viking { name: "Harald".to_string(), power: 8 }); +/// +/// // Use derived implementation to print the vikings. +/// for x in &vikings { +/// println!("{:?}", x); +/// } +/// ``` +/// +/// A `HashSet` with fixed list of elements can be initialized from an array: +/// +/// ``` +/// use hashbrown::HashSet; +/// +/// let viking_names: HashSet<&'static str> = +/// [ "Einar", "Olaf", "Harald" ].iter().cloned().collect(); +/// // use the values stored in the set +/// ``` +/// +/// [`Cell`]: https://doc.rust-lang.org/std/cell/struct.Cell.html +/// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html +/// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html +/// [`HashMap`]: struct.HashMap.html +/// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html +/// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html +pub struct HashSet { + pub(crate) map: HashMap, +} + +impl Clone for HashSet { + fn clone(&self) -> Self { + HashSet { + map: self.map.clone(), + } + } + + fn clone_from(&mut self, source: &Self) { + self.map.clone_from(&source.map); + } +} + +#[cfg(feature = "ahash")] +impl HashSet { + /// Creates an empty `HashSet`. + /// + /// The hash set is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::new(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn new() -> Self { + Self { + map: HashMap::new(), + } + } + + /// Creates an empty `HashSet` with the specified capacity. + /// + /// The hash set will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash set will not allocate. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::with_capacity(10); + /// assert!(set.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity(capacity: usize) -> Self { + Self { + map: HashMap::with_capacity(capacity), + } + } +} + +#[cfg(feature = "ahash")] +impl HashSet { + /// Creates an empty `HashSet`. + /// + /// The hash set is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::new(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn new_in(alloc: A) -> Self { + Self { + map: HashMap::new_in(alloc), + } + } + + /// Creates an empty `HashSet` with the specified capacity. + /// + /// The hash set will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash set will not allocate. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::with_capacity(10); + /// assert!(set.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + Self { + map: HashMap::with_capacity_in(capacity, alloc), + } + } +} + +impl HashSet { + /// Returns the number of elements the set can hold without reallocating. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let set: HashSet = HashSet::with_capacity(100); + /// assert!(set.capacity() >= 100); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn capacity(&self) -> usize { + self.map.capacity() + } + + /// An iterator visiting all elements in arbitrary order. + /// The iterator element type is `&'a T`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set = HashSet::new(); + /// set.insert("a"); + /// set.insert("b"); + /// + /// // Will print in an arbitrary order. + /// for x in set.iter() { + /// println!("{}", x); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn iter(&self) -> Iter<'_, T> { + Iter { + iter: self.map.keys(), + } + } + + /// Returns the number of elements in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut v = HashSet::new(); + /// assert_eq!(v.len(), 0); + /// v.insert(1); + /// assert_eq!(v.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn len(&self) -> usize { + self.map.len() + } + + /// Returns `true` if the set contains no elements. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut v = HashSet::new(); + /// assert!(v.is_empty()); + /// v.insert(1); + /// assert!(!v.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + /// Clears the set, returning all elements in an iterator. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert!(!set.is_empty()); + /// + /// // print 1, 2, 3 in an arbitrary order + /// for i in set.drain() { + /// println!("{}", i); + /// } + /// + /// assert!(set.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain(&mut self) -> Drain<'_, T, A> { + Drain { + iter: self.map.drain(), + } + } + + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all elements `e` such that `f(&e)` returns `false`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let xs = [1,2,3,4,5,6]; + /// let mut set: HashSet = xs.iter().cloned().collect(); + /// set.retain(|&k| k % 2 == 0); + /// assert_eq!(set.len(), 3); + /// ``` + pub fn retain(&mut self, mut f: F) + where + F: FnMut(&T) -> bool, + { + self.map.retain(|k, _| f(k)); + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// In other words, move all elements `e` such that `f(&e)` returns `true` out + /// into another iterator. + /// + /// When the returned DrainedFilter is dropped, any remaining elements that satisfy + /// the predicate are dropped from the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet = (0..8).collect(); + /// let drained: HashSet = set.drain_filter(|v| v % 2 == 0).collect(); + /// + /// let mut evens = drained.into_iter().collect::>(); + /// let mut odds = set.into_iter().collect::>(); + /// evens.sort(); + /// odds.sort(); + /// + /// assert_eq!(evens, vec![0, 2, 4, 6]); + /// assert_eq!(odds, vec![1, 3, 5, 7]); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn drain_filter(&mut self, f: F) -> DrainFilter<'_, T, F, A> + where + F: FnMut(&T) -> bool, + { + DrainFilter { + f, + inner: DrainFilterInner { + iter: unsafe { self.map.table.iter() }, + table: &mut self.map.table, + }, + } + } + + /// Clears the set, removing all values. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut v = HashSet::new(); + /// v.insert(1); + /// v.clear(); + /// assert!(v.is_empty()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn clear(&mut self) { + self.map.clear(); + } +} + +impl HashSet { + /// Creates a new empty hash set which will use the given hasher to hash + /// keys. + /// + /// The hash set is also created with the default initial capacity. + /// + /// Warning: `hasher` is normally randomly generated, and + /// is designed to allow `HashSet`s to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the HashMap to be useful, see its documentation for details. + /// + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut set = HashSet::with_hasher(s); + /// set.insert(2); + /// ``` + /// + /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html + #[cfg_attr(feature = "inline-more", inline)] + pub const fn with_hasher(hasher: S) -> Self { + Self { + map: HashMap::with_hasher(hasher), + } + } + + /// Creates an empty `HashSet` with the specified capacity, using + /// `hasher` to hash the keys. + /// + /// The hash set will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash set will not allocate. + /// + /// Warning: `hasher` is normally randomly generated, and + /// is designed to allow `HashSet`s to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// The `hash_builder` passed should implement the [`BuildHasher`] trait for + /// the HashMap to be useful, see its documentation for details. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut set = HashSet::with_capacity_and_hasher(10, s); + /// set.insert(1); + /// ``` + /// + /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { + Self { + map: HashMap::with_capacity_and_hasher(capacity, hasher), + } + } +} + +impl HashSet +where + A: Allocator + Clone, +{ + /// Returns a reference to the underlying allocator. + #[inline] + pub fn allocator(&self) -> &A { + self.map.allocator() + } + + /// Creates a new empty hash set which will use the given hasher to hash + /// keys. + /// + /// The hash set is also created with the default initial capacity. + /// + /// Warning: `hasher` is normally randomly generated, and + /// is designed to allow `HashSet`s to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut set = HashSet::with_hasher(s); + /// set.insert(2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_hasher_in(hasher: S, alloc: A) -> Self { + Self { + map: HashMap::with_hasher_in(hasher, alloc), + } + } + + /// Creates an empty `HashSet` with the specified capacity, using + /// `hasher` to hash the keys. + /// + /// The hash set will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash set will not allocate. + /// + /// Warning: `hasher` is normally randomly generated, and + /// is designed to allow `HashSet`s to be resistant to attacks that + /// cause many collisions and very poor performance. Setting it + /// manually using this function can expose a DoS attack vector. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let s = DefaultHashBuilder::default(); + /// let mut set = HashSet::with_capacity_and_hasher(10, s); + /// set.insert(1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn with_capacity_and_hasher_in(capacity: usize, hasher: S, alloc: A) -> Self { + Self { + map: HashMap::with_capacity_and_hasher_in(capacity, hasher, alloc), + } + } + + /// Returns a reference to the set's [`BuildHasher`]. + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_map::DefaultHashBuilder; + /// + /// let hasher = DefaultHashBuilder::default(); + /// let set: HashSet = HashSet::with_hasher(hasher); + /// let hasher: &DefaultHashBuilder = set.hasher(); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn hasher(&self) -> &S { + self.map.hasher() + } +} + +impl HashSet +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the `HashSet`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Panics + /// + /// Panics if the new allocation size overflows `usize`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set: HashSet = HashSet::new(); + /// set.reserve(10); + /// assert!(set.capacity() >= 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn reserve(&mut self, additional: usize) { + self.map.reserve(additional); + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashSet`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set: HashSet = HashSet::new(); + /// set.try_reserve(10).expect("why is the test harness OOMing on 10 bytes?"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + self.map.try_reserve(additional) + } + + /// Shrinks the capacity of the set as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::with_capacity(100); + /// set.insert(1); + /// set.insert(2); + /// assert!(set.capacity() >= 100); + /// set.shrink_to_fit(); + /// assert!(set.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to_fit(&mut self) { + self.map.shrink_to_fit(); + } + + /// Shrinks the capacity of the set with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::with_capacity(100); + /// set.insert(1); + /// set.insert(2); + /// assert!(set.capacity() >= 100); + /// set.shrink_to(10); + /// assert!(set.capacity() >= 10); + /// set.shrink_to(0); + /// assert!(set.capacity() >= 2); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.map.shrink_to(min_capacity); + } + + /// Visits the values representing the difference, + /// i.e., the values that are in `self` but not in `other`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); + /// + /// // Can be seen as `a - b`. + /// for x in a.difference(&b) { + /// println!("{}", x); // Print 1 + /// } + /// + /// let diff: HashSet<_> = a.difference(&b).collect(); + /// assert_eq!(diff, [1].iter().collect()); + /// + /// // Note that difference is not symmetric, + /// // and `b - a` means something else: + /// let diff: HashSet<_> = b.difference(&a).collect(); + /// assert_eq!(diff, [4].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn difference<'a>(&'a self, other: &'a Self) -> Difference<'a, T, S, A> { + Difference { + iter: self.iter(), + other, + } + } + + /// Visits the values representing the symmetric difference, + /// i.e., the values that are in `self` or in `other` but not in both. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); + /// + /// // Print 1, 4 in arbitrary order. + /// for x in a.symmetric_difference(&b) { + /// println!("{}", x); + /// } + /// + /// let diff1: HashSet<_> = a.symmetric_difference(&b).collect(); + /// let diff2: HashSet<_> = b.symmetric_difference(&a).collect(); + /// + /// assert_eq!(diff1, diff2); + /// assert_eq!(diff1, [1, 4].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn symmetric_difference<'a>(&'a self, other: &'a Self) -> SymmetricDifference<'a, T, S, A> { + SymmetricDifference { + iter: self.difference(other).chain(other.difference(self)), + } + } + + /// Visits the values representing the intersection, + /// i.e., the values that are both in `self` and `other`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); + /// + /// // Print 2, 3 in arbitrary order. + /// for x in a.intersection(&b) { + /// println!("{}", x); + /// } + /// + /// let intersection: HashSet<_> = a.intersection(&b).collect(); + /// assert_eq!(intersection, [2, 3].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn intersection<'a>(&'a self, other: &'a Self) -> Intersection<'a, T, S, A> { + let (smaller, larger) = if self.len() <= other.len() { + (self, other) + } else { + (other, self) + }; + Intersection { + iter: smaller.iter(), + other: larger, + } + } + + /// Visits the values representing the union, + /// i.e., all the values in `self` or `other`, without duplicates. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect(); + /// + /// // Print 1, 2, 3, 4 in arbitrary order. + /// for x in a.union(&b) { + /// println!("{}", x); + /// } + /// + /// let union: HashSet<_> = a.union(&b).collect(); + /// assert_eq!(union, [1, 2, 3, 4].iter().collect()); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn union<'a>(&'a self, other: &'a Self) -> Union<'a, T, S, A> { + // We'll iterate one set in full, and only the remaining difference from the other. + // Use the smaller set for the difference in order to reduce hash lookups. + let (smaller, larger) = if self.len() <= other.len() { + (self, other) + } else { + (other, self) + }; + Union { + iter: larger.iter().chain(smaller.difference(larger)), + } + } + + /// Returns `true` if the set contains a value. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert_eq!(set.contains(&1), true); + /// assert_eq!(set.contains(&4), false); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn contains(&self, value: &Q) -> bool + where + T: Borrow, + Q: Hash + Eq, + { + self.map.contains_key(value) + } + + /// Returns a reference to the value in the set, if any, that is equal to the given value. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert_eq!(set.get(&2), Some(&2)); + /// assert_eq!(set.get(&4), None); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self, value: &Q) -> Option<&T> + where + T: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.map.get_key_value(value) { + Some((k, _)) => Some(k), + None => None, + } + } + + /// Inserts the given `value` into the set if it is not present, then + /// returns a reference to the value in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert_eq!(set.len(), 3); + /// assert_eq!(set.get_or_insert(2), &2); + /// assert_eq!(set.get_or_insert(100), &100); + /// assert_eq!(set.len(), 4); // 100 was inserted + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_or_insert(&mut self, value: T) -> &T { + // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with + // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. + self.map + .raw_entry_mut() + .from_key(&value) + .or_insert(value, ()) + .0 + } + + /// Inserts an owned copy of the given `value` into the set if it is not + /// present, then returns a reference to the value in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet = ["cat", "dog", "horse"] + /// .iter().map(|&pet| pet.to_owned()).collect(); + /// + /// assert_eq!(set.len(), 3); + /// for &pet in &["cat", "dog", "fish"] { + /// let value = set.get_or_insert_owned(pet); + /// assert_eq!(value, pet); + /// } + /// assert_eq!(set.len(), 4); // a new "fish" was inserted + /// ``` + #[inline] + pub fn get_or_insert_owned(&mut self, value: &Q) -> &T + where + T: Borrow, + Q: Hash + Eq + ToOwned, + { + // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with + // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. + self.map + .raw_entry_mut() + .from_key(value) + .or_insert_with(|| (value.to_owned(), ())) + .0 + } + + /// Inserts a value computed from `f` into the set if the given `value` is + /// not present, then returns a reference to the value in the set. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet = ["cat", "dog", "horse"] + /// .iter().map(|&pet| pet.to_owned()).collect(); + /// + /// assert_eq!(set.len(), 3); + /// for &pet in &["cat", "dog", "fish"] { + /// let value = set.get_or_insert_with(pet, str::to_owned); + /// assert_eq!(value, pet); + /// } + /// assert_eq!(set.len(), 4); // a new "fish" was inserted + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get_or_insert_with(&mut self, value: &Q, f: F) -> &T + where + T: Borrow, + Q: Hash + Eq, + F: FnOnce(&Q) -> T, + { + // Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with + // `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`. + self.map + .raw_entry_mut() + .from_key(value) + .or_insert_with(|| (f(value), ())) + .0 + } + + /// Gets the given value's corresponding entry in the set for in-place manipulation. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_set::Entry::*; + /// + /// let mut singles = HashSet::new(); + /// let mut dupes = HashSet::new(); + /// + /// for ch in "a short treatise on fungi".chars() { + /// if let Vacant(dupe_entry) = dupes.entry(ch) { + /// // We haven't already seen a duplicate, so + /// // check if we've at least seen it once. + /// match singles.entry(ch) { + /// Vacant(single_entry) => { + /// // We found a new character for the first time. + /// single_entry.insert() + /// } + /// Occupied(single_entry) => { + /// // We've already seen this once, "move" it to dupes. + /// single_entry.remove(); + /// dupe_entry.insert(); + /// } + /// } + /// } + /// } + /// + /// assert!(!singles.contains(&'t') && dupes.contains(&'t')); + /// assert!(singles.contains(&'u') && !dupes.contains(&'u')); + /// assert!(!singles.contains(&'v') && !dupes.contains(&'v')); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn entry(&mut self, value: T) -> Entry<'_, T, S, A> { + match self.map.entry(value) { + map::Entry::Occupied(entry) => Entry::Occupied(OccupiedEntry { inner: entry }), + map::Entry::Vacant(entry) => Entry::Vacant(VacantEntry { inner: entry }), + } + } + + /// Returns `true` if `self` has no elements in common with `other`. + /// This is equivalent to checking for an empty intersection. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let mut b = HashSet::new(); + /// + /// assert_eq!(a.is_disjoint(&b), true); + /// b.insert(4); + /// assert_eq!(a.is_disjoint(&b), true); + /// b.insert(1); + /// assert_eq!(a.is_disjoint(&b), false); + /// ``` + pub fn is_disjoint(&self, other: &Self) -> bool { + self.iter().all(|v| !other.contains(v)) + } + + /// Returns `true` if the set is a subset of another, + /// i.e., `other` contains at least all the values in `self`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let sup: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// let mut set = HashSet::new(); + /// + /// assert_eq!(set.is_subset(&sup), true); + /// set.insert(2); + /// assert_eq!(set.is_subset(&sup), true); + /// set.insert(4); + /// assert_eq!(set.is_subset(&sup), false); + /// ``` + pub fn is_subset(&self, other: &Self) -> bool { + self.len() <= other.len() && self.iter().all(|v| other.contains(v)) + } + + /// Returns `true` if the set is a superset of another, + /// i.e., `self` contains at least all the values in `other`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let sub: HashSet<_> = [1, 2].iter().cloned().collect(); + /// let mut set = HashSet::new(); + /// + /// assert_eq!(set.is_superset(&sub), false); + /// + /// set.insert(0); + /// set.insert(1); + /// assert_eq!(set.is_superset(&sub), false); + /// + /// set.insert(2); + /// assert_eq!(set.is_superset(&sub), true); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn is_superset(&self, other: &Self) -> bool { + other.is_subset(self) + } + + /// Adds a value to the set. + /// + /// If the set did not have this value present, `true` is returned. + /// + /// If the set did have this value present, `false` is returned. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::new(); + /// + /// assert_eq!(set.insert(2), true); + /// assert_eq!(set.insert(2), false); + /// assert_eq!(set.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(&mut self, value: T) -> bool { + self.map.insert(value, ()).is_none() + } + + /// Insert a value the set without checking if the value already exists in the set. + /// + /// Returns a reference to the value just inserted. + /// + /// This operation is safe if a value does not exist in the set. + /// + /// However, if a value exists in the set already, the behavior is unspecified: + /// this operation may panic, loop forever, or any following operation with the set + /// may panic, loop forever or return arbitrary result. + /// + /// That said, this operation (and following operations) are guaranteed to + /// not violate memory safety. + /// + /// This operation is faster than regular insert, because it does not perform + /// lookup before insertion. + /// + /// This operation is useful during initial population of the set. + /// For example, when constructing a set from another set, we know + /// that values are unique. + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert_unique_unchecked(&mut self, value: T) -> &T { + self.map.insert_unique_unchecked(value, ()).0 + } + + /// Adds a value to the set, replacing the existing value, if any, that is equal to the given + /// one. Returns the replaced value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::new(); + /// set.insert(Vec::::new()); + /// + /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 0); + /// set.replace(Vec::with_capacity(10)); + /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 10); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace(&mut self, value: T) -> Option { + match self.map.entry(value) { + map::Entry::Occupied(occupied) => Some(occupied.replace_key()), + map::Entry::Vacant(vacant) => { + vacant.insert(()); + None + } + } + } + + /// Removes a value from the set. Returns whether the value was + /// present in the set. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set = HashSet::new(); + /// + /// set.insert(2); + /// assert_eq!(set.remove(&2), true); + /// assert_eq!(set.remove(&2), false); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(&mut self, value: &Q) -> bool + where + T: Borrow, + Q: Hash + Eq, + { + self.map.remove(value).is_some() + } + + /// Removes and returns the value in the set, if any, that is equal to the given one. + /// + /// The value may be any borrowed form of the set's value type, but + /// [`Hash`] and [`Eq`] on the borrowed form *must* match those for + /// the value type. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect(); + /// assert_eq!(set.take(&2), Some(2)); + /// assert_eq!(set.take(&2), None); + /// ``` + /// + /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html + /// [`Hash`]: https://doc.rust-lang.org/std/hash/trait.Hash.html + #[cfg_attr(feature = "inline-more", inline)] + pub fn take(&mut self, value: &Q) -> Option + where + T: Borrow, + Q: Hash + Eq, + { + // Avoid `Option::map` because it bloats LLVM IR. + match self.map.remove_entry(value) { + Some((k, _)) => Some(k), + None => None, + } + } +} + +impl PartialEq for HashSet +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + fn eq(&self, other: &Self) -> bool { + if self.len() != other.len() { + return false; + } + + self.iter().all(|key| other.contains(key)) + } +} + +impl Eq for HashSet +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ +} + +impl fmt::Debug for HashSet +where + T: fmt::Debug, + A: Allocator + Clone, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set().entries(self.iter()).finish() + } +} + +impl From> for HashSet +where + A: Allocator + Clone, +{ + fn from(map: HashMap) -> Self { + Self { map } + } +} + +impl FromIterator for HashSet +where + T: Eq + Hash, + S: BuildHasher + Default, + A: Default + Allocator + Clone, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn from_iter>(iter: I) -> Self { + let mut set = Self::with_hasher_in(Default::default(), Default::default()); + set.extend(iter); + set + } +} + +// The default hasher is used to match the std implementation signature +#[cfg(feature = "ahash")] +impl From<[T; N]> for HashSet +where + T: Eq + Hash, + A: Default + Allocator + Clone, +{ + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let set1 = HashSet::from([1, 2, 3, 4]); + /// let set2: HashSet<_> = [1, 2, 3, 4].into(); + /// assert_eq!(set1, set2); + /// ``` + fn from(arr: [T; N]) -> Self { + arr.into_iter().collect() + } +} + +impl Extend for HashSet +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: I) { + self.map.extend(iter.into_iter().map(|k| (k, ()))); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, k: T) { + self.map.insert(k, ()); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(T, ())>::extend_reserve(&mut self.map, additional); + } +} + +impl<'a, T, S, A> Extend<&'a T> for HashSet +where + T: 'a + Eq + Hash + Copy, + S: BuildHasher, + A: Allocator + Clone, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn extend>(&mut self, iter: I) { + self.extend(iter.into_iter().copied()); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_one(&mut self, k: &'a T) { + self.map.insert(*k, ()); + } + + #[inline] + #[cfg(feature = "nightly")] + fn extend_reserve(&mut self, additional: usize) { + Extend::<(T, ())>::extend_reserve(&mut self.map, additional); + } +} + +impl Default for HashSet +where + S: Default, + A: Default + Allocator + Clone, +{ + /// Creates an empty `HashSet` with the `Default` value for the hasher. + #[cfg_attr(feature = "inline-more", inline)] + fn default() -> Self { + Self { + map: HashMap::default(), + } + } +} + +impl BitOr<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, + A: Allocator + Clone, +{ + type Output = HashSet; + + /// Returns the union of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a | &b; + /// + /// let mut i = 0; + /// let expected = [1, 2, 3, 4, 5]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitor(self, rhs: &HashSet) -> HashSet { + self.union(rhs).cloned().collect() + } +} + +impl BitAnd<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, + A: Allocator + Clone, +{ + type Output = HashSet; + + /// Returns the intersection of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![2, 3, 4].into_iter().collect(); + /// + /// let set = &a & &b; + /// + /// let mut i = 0; + /// let expected = [2, 3]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitand(self, rhs: &HashSet) -> HashSet { + self.intersection(rhs).cloned().collect() + } +} + +impl BitXor<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, +{ + type Output = HashSet; + + /// Returns the symmetric difference of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a ^ &b; + /// + /// let mut i = 0; + /// let expected = [1, 2, 4, 5]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn bitxor(self, rhs: &HashSet) -> HashSet { + self.symmetric_difference(rhs).cloned().collect() + } +} + +impl Sub<&HashSet> for &HashSet +where + T: Eq + Hash + Clone, + S: BuildHasher + Default, +{ + type Output = HashSet; + + /// Returns the difference of `self` and `rhs` as a new `HashSet`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let a: HashSet<_> = vec![1, 2, 3].into_iter().collect(); + /// let b: HashSet<_> = vec![3, 4, 5].into_iter().collect(); + /// + /// let set = &a - &b; + /// + /// let mut i = 0; + /// let expected = [1, 2]; + /// for x in &set { + /// assert!(expected.contains(x)); + /// i += 1; + /// } + /// assert_eq!(i, expected.len()); + /// ``` + fn sub(self, rhs: &HashSet) -> HashSet { + self.difference(rhs).cloned().collect() + } +} + +/// An iterator over the items of a `HashSet`. +/// +/// This `struct` is created by the [`iter`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`iter`]: struct.HashSet.html#method.iter +pub struct Iter<'a, K> { + iter: Keys<'a, K, ()>, +} + +/// An owning iterator over the items of a `HashSet`. +/// +/// This `struct` is created by the [`into_iter`] method on [`HashSet`] +/// (provided by the `IntoIterator` trait). See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`into_iter`]: struct.HashSet.html#method.into_iter +pub struct IntoIter { + iter: map::IntoIter, +} + +/// A draining iterator over the items of a `HashSet`. +/// +/// This `struct` is created by the [`drain`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`drain`]: struct.HashSet.html#method.drain +pub struct Drain<'a, K, A: Allocator + Clone = Global> { + iter: map::Drain<'a, K, (), A>, +} + +/// A draining iterator over entries of a `HashSet` which don't satisfy the predicate `f`. +/// +/// This `struct` is created by the [`drain_filter`] method on [`HashSet`]. See its +/// documentation for more. +/// +/// [`drain_filter`]: struct.HashSet.html#method.drain_filter +/// [`HashSet`]: struct.HashSet.html +pub struct DrainFilter<'a, K, F, A: Allocator + Clone = Global> +where + F: FnMut(&K) -> bool, +{ + f: F, + inner: DrainFilterInner<'a, K, (), A>, +} + +/// A lazy iterator producing elements in the intersection of `HashSet`s. +/// +/// This `struct` is created by the [`intersection`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`intersection`]: struct.HashSet.html#method.intersection +pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> { + // iterator of the first set + iter: Iter<'a, T>, + // the second set + other: &'a HashSet, +} + +/// A lazy iterator producing elements in the difference of `HashSet`s. +/// +/// This `struct` is created by the [`difference`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`difference`]: struct.HashSet.html#method.difference +pub struct Difference<'a, T, S, A: Allocator + Clone = Global> { + // iterator of the first set + iter: Iter<'a, T>, + // the second set + other: &'a HashSet, +} + +/// A lazy iterator producing elements in the symmetric difference of `HashSet`s. +/// +/// This `struct` is created by the [`symmetric_difference`] method on +/// [`HashSet`]. See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`symmetric_difference`]: struct.HashSet.html#method.symmetric_difference +pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { + iter: Chain, Difference<'a, T, S, A>>, +} + +/// A lazy iterator producing elements in the union of `HashSet`s. +/// +/// This `struct` is created by the [`union`] method on [`HashSet`]. +/// See its documentation for more. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`union`]: struct.HashSet.html#method.union +pub struct Union<'a, T, S, A: Allocator + Clone = Global> { + iter: Chain, Difference<'a, T, S, A>>, +} + +impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet { + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl IntoIterator for HashSet { + type Item = T; + type IntoIter = IntoIter; + + /// Creates a consuming iterator, that is, one that moves each value out + /// of the set in arbitrary order. The set cannot be used after calling + /// this. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// let mut set = HashSet::new(); + /// set.insert("a".to_string()); + /// set.insert("b".to_string()); + /// + /// // Not possible to collect to a Vec with a regular `.iter()`. + /// let v: Vec = set.into_iter().collect(); + /// + /// // Will print in an arbitrary order. + /// for x in &v { + /// println!("{}", x); + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + fn into_iter(self) -> IntoIter { + IntoIter { + iter: self.map.into_iter(), + } + } +} + +impl Clone for Iter<'_, K> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Iter { + iter: self.iter.clone(), + } + } +} +impl<'a, K> Iterator for Iter<'a, K> { + type Item = &'a K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a K> { + self.iter.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} +impl<'a, K> ExactSizeIterator for Iter<'a, K> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.iter.len() + } +} +impl FusedIterator for Iter<'_, K> {} + +impl fmt::Debug for Iter<'_, K> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Iterator for IntoIter { + type Item = K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.iter.next() { + Some((k, _)) => Some(k), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} +impl ExactSizeIterator for IntoIter { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.iter.len() + } +} +impl FusedIterator for IntoIter {} + +impl fmt::Debug for IntoIter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let entries_iter = self.iter.iter().map(|(k, _)| k); + f.debug_list().entries(entries_iter).finish() + } +} + +impl Iterator for Drain<'_, K, A> { + type Item = K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + // Avoid `Option::map` because it bloats LLVM IR. + match self.iter.next() { + Some((k, _)) => Some(k), + None => None, + } + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} +impl ExactSizeIterator for Drain<'_, K, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn len(&self) -> usize { + self.iter.len() + } +} +impl FusedIterator for Drain<'_, K, A> {} + +impl fmt::Debug for Drain<'_, K, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let entries_iter = self.iter.iter().map(|(k, _)| k); + f.debug_list().entries(entries_iter).finish() + } +} + +impl<'a, K, F, A: Allocator + Clone> Drop for DrainFilter<'a, K, F, A> +where + F: FnMut(&K) -> bool, +{ + #[cfg_attr(feature = "inline-more", inline)] + fn drop(&mut self) { + while let Some(item) = self.next() { + let guard = ConsumeAllOnDrop(self); + drop(item); + mem::forget(guard); + } + } +} + +impl Iterator for DrainFilter<'_, K, F, A> +where + F: FnMut(&K) -> bool, +{ + type Item = K; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option { + let f = &mut self.f; + let (k, _) = self.inner.next(&mut |k, _| f(k))?; + Some(k) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + (0, self.inner.iter.size_hint().1) + } +} + +impl FusedIterator for DrainFilter<'_, K, F, A> where + F: FnMut(&K) -> bool +{ +} + +impl Clone for Intersection<'_, T, S, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Intersection { + iter: self.iter.clone(), + ..*self + } + } +} + +impl<'a, T, S, A> Iterator for Intersection<'a, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + loop { + let elt = self.iter.next()?; + if self.other.contains(elt) { + return Some(elt); + } + } + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) + } +} + +impl fmt::Debug for Intersection<'_, T, S, A> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl FusedIterator for Intersection<'_, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ +} + +impl Clone for Difference<'_, T, S, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Difference { + iter: self.iter.clone(), + ..*self + } + } +} + +impl<'a, T, S, A> Iterator for Difference<'a, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + loop { + let elt = self.iter.next()?; + if !self.other.contains(elt) { + return Some(elt); + } + } + } + + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) + } +} + +impl FusedIterator for Difference<'_, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ +} + +impl fmt::Debug for Difference<'_, T, S, A> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Clone for SymmetricDifference<'_, T, S, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + SymmetricDifference { + iter: self.iter.clone(), + } + } +} + +impl<'a, T, S, A> Iterator for SymmetricDifference<'a, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + self.iter.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +impl FusedIterator for SymmetricDifference<'_, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ +} + +impl fmt::Debug for SymmetricDifference<'_, T, S, A> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl Clone for Union<'_, T, S, A> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Union { + iter: self.iter.clone(), + } + } +} + +impl FusedIterator for Union<'_, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ +} + +impl fmt::Debug for Union<'_, T, S, A> +where + T: fmt::Debug + Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.clone()).finish() + } +} + +impl<'a, T, S, A> Iterator for Union<'a, T, S, A> +where + T: Eq + Hash, + S: BuildHasher, + A: Allocator + Clone, +{ + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn next(&mut self) -> Option<&'a T> { + self.iter.next() + } + #[cfg_attr(feature = "inline-more", inline)] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } +} + +/// A view into a single entry in a set, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`entry`] method on [`HashSet`]. +/// +/// [`HashSet`]: struct.HashSet.html +/// [`entry`]: struct.HashSet.html#method.entry +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_set::{Entry, HashSet, OccupiedEntry}; +/// +/// let mut set = HashSet::new(); +/// set.extend(["a", "b", "c"]); +/// assert_eq!(set.len(), 3); +/// +/// // Existing value (insert) +/// let entry: Entry<_, _> = set.entry("a"); +/// let _raw_o: OccupiedEntry<_, _> = entry.insert(); +/// assert_eq!(set.len(), 3); +/// // Nonexistent value (insert) +/// set.entry("d").insert(); +/// +/// // Existing value (or_insert) +/// set.entry("b").or_insert(); +/// // Nonexistent value (or_insert) +/// set.entry("e").or_insert(); +/// +/// println!("Our HashSet: {:?}", set); +/// +/// let mut vec: Vec<_> = set.iter().copied().collect(); +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, ["a", "b", "c", "d", "e"]); +/// ``` +pub enum Entry<'a, T, S, A = Global> +where + A: Allocator + Clone, +{ + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_set::{Entry, HashSet}; + /// let mut set: HashSet<_> = ["a", "b"].into(); + /// + /// match set.entry("a") { + /// Entry::Vacant(_) => unreachable!(), + /// Entry::Occupied(_) => { } + /// } + /// ``` + Occupied(OccupiedEntry<'a, T, S, A>), + + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_set::{Entry, HashSet}; + /// let mut set: HashSet<&str> = HashSet::new(); + /// + /// match set.entry("a") { + /// Entry::Occupied(_) => unreachable!(), + /// Entry::Vacant(_) => { } + /// } + /// ``` + Vacant(VacantEntry<'a, T, S, A>), +} + +impl fmt::Debug for Entry<'_, T, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +/// A view into an occupied entry in a `HashSet`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_set::{Entry, HashSet, OccupiedEntry}; +/// +/// let mut set = HashSet::new(); +/// set.extend(["a", "b", "c"]); +/// +/// let _entry_o: OccupiedEntry<_, _> = set.entry("a").insert(); +/// assert_eq!(set.len(), 3); +/// +/// // Existing key +/// match set.entry("a") { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.get(), &"a"); +/// } +/// } +/// +/// assert_eq!(set.len(), 3); +/// +/// // Existing key (take) +/// match set.entry("c") { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.remove(), "c"); +/// } +/// } +/// assert_eq!(set.get(&"c"), None); +/// assert_eq!(set.len(), 2); +/// ``` +pub struct OccupiedEntry<'a, T, S, A: Allocator + Clone = Global> { + inner: map::OccupiedEntry<'a, T, (), S, A>, +} + +impl fmt::Debug for OccupiedEntry<'_, T, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("value", self.get()) + .finish() + } +} + +/// A view into a vacant entry in a `HashSet`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// use hashbrown::hash_set::{Entry, HashSet, VacantEntry}; +/// +/// let mut set = HashSet::<&str>::new(); +/// +/// let entry_v: VacantEntry<_, _> = match set.entry("a") { +/// Entry::Vacant(view) => view, +/// Entry::Occupied(_) => unreachable!(), +/// }; +/// entry_v.insert(); +/// assert!(set.contains("a") && set.len() == 1); +/// +/// // Nonexistent key (insert) +/// match set.entry("b") { +/// Entry::Vacant(view) => view.insert(), +/// Entry::Occupied(_) => unreachable!(), +/// } +/// assert!(set.contains("b") && set.len() == 2); +/// ``` +pub struct VacantEntry<'a, T, S, A: Allocator + Clone = Global> { + inner: map::VacantEntry<'a, T, (), S, A>, +} + +impl fmt::Debug for VacantEntry<'_, T, S, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_tuple("VacantEntry").field(self.get()).finish() + } +} + +impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> { + /// Sets the value of the entry, and returns an OccupiedEntry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// let entry = set.entry("horseyland").insert(); + /// + /// assert_eq!(entry.get(), &"horseyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self) -> OccupiedEntry<'a, T, S, A> + where + T: Hash, + S: BuildHasher, + { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => entry.insert_entry(), + } + } + + /// Ensures a value is in the entry by inserting if it was vacant. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// + /// // nonexistent key + /// set.entry("poneyland").or_insert(); + /// assert!(set.contains("poneyland")); + /// + /// // existing key + /// set.entry("poneyland").or_insert(); + /// assert!(set.contains("poneyland")); + /// assert_eq!(set.len(), 1); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn or_insert(self) + where + T: Hash, + S: BuildHasher, + { + if let Entry::Vacant(entry) = self { + entry.insert(); + } + } + + /// Returns a reference to this entry's value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// set.entry("poneyland").or_insert(); + /// // existing key + /// assert_eq!(set.entry("poneyland").get(), &"poneyland"); + /// // nonexistent key + /// assert_eq!(set.entry("horseland").get(), &"horseland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &T { + match *self { + Entry::Occupied(ref entry) => entry.get(), + Entry::Vacant(ref entry) => entry.get(), + } + } +} + +impl OccupiedEntry<'_, T, S, A> { + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_set::{Entry, HashSet}; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// set.entry("poneyland").or_insert(); + /// + /// match set.entry("poneyland") { + /// Entry::Vacant(_) => panic!(), + /// Entry::Occupied(entry) => assert_eq!(entry.get(), &"poneyland"), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &T { + self.inner.key() + } + + /// Takes the value out of the entry, and returns it. + /// Keeps the allocated memory for reuse. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_set::Entry; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// // The set is empty + /// assert!(set.is_empty() && set.capacity() == 0); + /// + /// set.entry("poneyland").or_insert(); + /// let capacity_before_remove = set.capacity(); + /// + /// if let Entry::Occupied(o) = set.entry("poneyland") { + /// assert_eq!(o.remove(), "poneyland"); + /// } + /// + /// assert_eq!(set.contains("poneyland"), false); + /// // Now set hold none elements but capacity is equal to the old one + /// assert!(set.len() == 0 && set.capacity() == capacity_before_remove); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn remove(self) -> T { + self.inner.remove_entry().0 + } + + /// Replaces the entry, returning the old value. The new value in the hash map will be + /// the value used to create this entry. + /// + /// # Panics + /// + /// Will panic if this OccupiedEntry was created through [`Entry::insert`]. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_set::{Entry, HashSet}; + /// use std::rc::Rc; + /// + /// let mut set: HashSet> = HashSet::new(); + /// let key_one = Rc::new("Stringthing".to_string()); + /// let key_two = Rc::new("Stringthing".to_string()); + /// + /// set.insert(key_one.clone()); + /// assert!(Rc::strong_count(&key_one) == 2 && Rc::strong_count(&key_two) == 1); + /// + /// match set.entry(key_two.clone()) { + /// Entry::Occupied(entry) => { + /// let old_key: Rc = entry.replace(); + /// assert!(Rc::ptr_eq(&key_one, &old_key)); + /// } + /// Entry::Vacant(_) => panic!(), + /// } + /// + /// assert!(Rc::strong_count(&key_one) == 1 && Rc::strong_count(&key_two) == 2); + /// assert!(set.contains(&"Stringthing".to_owned())); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn replace(self) -> T { + self.inner.replace_key() + } +} + +impl<'a, T, S, A: Allocator + Clone> VacantEntry<'a, T, S, A> { + /// Gets a reference to the value that would be used when inserting + /// through the `VacantEntry`. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// assert_eq!(set.entry("poneyland").get(), &"poneyland"); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn get(&self) -> &T { + self.inner.key() + } + + /// Take ownership of the value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::hash_set::{Entry, HashSet}; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// + /// match set.entry("poneyland") { + /// Entry::Occupied(_) => panic!(), + /// Entry::Vacant(v) => assert_eq!(v.into_value(), "poneyland"), + /// } + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn into_value(self) -> T { + self.inner.into_key() + } + + /// Sets the value of the entry with the VacantEntry's value. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashSet; + /// use hashbrown::hash_set::Entry; + /// + /// let mut set: HashSet<&str> = HashSet::new(); + /// + /// if let Entry::Vacant(o) = set.entry("poneyland") { + /// o.insert(); + /// } + /// assert!(set.contains("poneyland")); + /// ``` + #[cfg_attr(feature = "inline-more", inline)] + pub fn insert(self) + where + T: Hash, + S: BuildHasher, + { + self.inner.insert(()); + } + + #[cfg_attr(feature = "inline-more", inline)] + fn insert_entry(self) -> OccupiedEntry<'a, T, S, A> + where + T: Hash, + S: BuildHasher, + { + OccupiedEntry { + inner: self.inner.insert_entry(()), + } + } +} + +#[allow(dead_code)] +fn assert_covariance() { + fn set<'new>(v: HashSet<&'static str>) -> HashSet<&'new str> { + v + } + fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> { + v + } + fn into_iter<'new, A: Allocator + Clone>( + v: IntoIter<&'static str, A>, + ) -> IntoIter<&'new str, A> { + v + } + fn difference<'a, 'new, A: Allocator + Clone>( + v: Difference<'a, &'static str, DefaultHashBuilder, A>, + ) -> Difference<'a, &'new str, DefaultHashBuilder, A> { + v + } + fn symmetric_difference<'a, 'new, A: Allocator + Clone>( + v: SymmetricDifference<'a, &'static str, DefaultHashBuilder, A>, + ) -> SymmetricDifference<'a, &'new str, DefaultHashBuilder, A> { + v + } + fn intersection<'a, 'new, A: Allocator + Clone>( + v: Intersection<'a, &'static str, DefaultHashBuilder, A>, + ) -> Intersection<'a, &'new str, DefaultHashBuilder, A> { + v + } + fn union<'a, 'new, A: Allocator + Clone>( + v: Union<'a, &'static str, DefaultHashBuilder, A>, + ) -> Union<'a, &'new str, DefaultHashBuilder, A> { + v + } + fn drain<'new, A: Allocator + Clone>( + d: Drain<'static, &'static str, A>, + ) -> Drain<'new, &'new str, A> { + d + } +} + +#[cfg(test)] +mod test_set { + use super::super::map::DefaultHashBuilder; + use super::HashSet; + use std::vec::Vec; + + #[test] + fn test_zero_capacities() { + type HS = HashSet; + + let s = HS::new(); + assert_eq!(s.capacity(), 0); + + let s = HS::default(); + assert_eq!(s.capacity(), 0); + + let s = HS::with_hasher(DefaultHashBuilder::default()); + assert_eq!(s.capacity(), 0); + + let s = HS::with_capacity(0); + assert_eq!(s.capacity(), 0); + + let s = HS::with_capacity_and_hasher(0, DefaultHashBuilder::default()); + assert_eq!(s.capacity(), 0); + + let mut s = HS::new(); + s.insert(1); + s.insert(2); + s.remove(&1); + s.remove(&2); + s.shrink_to_fit(); + assert_eq!(s.capacity(), 0); + + let mut s = HS::new(); + s.reserve(0); + assert_eq!(s.capacity(), 0); + } + + #[test] + fn test_disjoint() { + let mut xs = HashSet::new(); + let mut ys = HashSet::new(); + assert!(xs.is_disjoint(&ys)); + assert!(ys.is_disjoint(&xs)); + assert!(xs.insert(5)); + assert!(ys.insert(11)); + assert!(xs.is_disjoint(&ys)); + assert!(ys.is_disjoint(&xs)); + assert!(xs.insert(7)); + assert!(xs.insert(19)); + assert!(xs.insert(4)); + assert!(ys.insert(2)); + assert!(ys.insert(-11)); + assert!(xs.is_disjoint(&ys)); + assert!(ys.is_disjoint(&xs)); + assert!(ys.insert(7)); + assert!(!xs.is_disjoint(&ys)); + assert!(!ys.is_disjoint(&xs)); + } + + #[test] + fn test_subset_and_superset() { + let mut a = HashSet::new(); + assert!(a.insert(0)); + assert!(a.insert(5)); + assert!(a.insert(11)); + assert!(a.insert(7)); + + let mut b = HashSet::new(); + assert!(b.insert(0)); + assert!(b.insert(7)); + assert!(b.insert(19)); + assert!(b.insert(250)); + assert!(b.insert(11)); + assert!(b.insert(200)); + + assert!(!a.is_subset(&b)); + assert!(!a.is_superset(&b)); + assert!(!b.is_subset(&a)); + assert!(!b.is_superset(&a)); + + assert!(b.insert(5)); + + assert!(a.is_subset(&b)); + assert!(!a.is_superset(&b)); + assert!(!b.is_subset(&a)); + assert!(b.is_superset(&a)); + } + + #[test] + fn test_iterate() { + let mut a = HashSet::new(); + for i in 0..32 { + assert!(a.insert(i)); + } + let mut observed: u32 = 0; + for k in &a { + observed |= 1 << *k; + } + assert_eq!(observed, 0xFFFF_FFFF); + } + + #[test] + fn test_intersection() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(11)); + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(77)); + assert!(a.insert(103)); + assert!(a.insert(5)); + assert!(a.insert(-5)); + + assert!(b.insert(2)); + assert!(b.insert(11)); + assert!(b.insert(77)); + assert!(b.insert(-9)); + assert!(b.insert(-42)); + assert!(b.insert(5)); + assert!(b.insert(3)); + + let mut i = 0; + let expected = [3, 5, 11, 77]; + for x in a.intersection(&b) { + assert!(expected.contains(x)); + i += 1; + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(3)); + assert!(b.insert(9)); + + let mut i = 0; + let expected = [1, 5, 11]; + for x in a.difference(&b) { + assert!(expected.contains(x)); + i += 1; + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_symmetric_difference() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + + assert!(b.insert(-2)); + assert!(b.insert(3)); + assert!(b.insert(9)); + assert!(b.insert(14)); + assert!(b.insert(22)); + + let mut i = 0; + let expected = [-2, 1, 5, 11, 14, 22]; + for x in a.symmetric_difference(&b) { + assert!(expected.contains(x)); + i += 1; + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_union() { + let mut a = HashSet::new(); + let mut b = HashSet::new(); + + assert!(a.insert(1)); + assert!(a.insert(3)); + assert!(a.insert(5)); + assert!(a.insert(9)); + assert!(a.insert(11)); + assert!(a.insert(16)); + assert!(a.insert(19)); + assert!(a.insert(24)); + + assert!(b.insert(-2)); + assert!(b.insert(1)); + assert!(b.insert(5)); + assert!(b.insert(9)); + assert!(b.insert(13)); + assert!(b.insert(19)); + + let mut i = 0; + let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24]; + for x in a.union(&b) { + assert!(expected.contains(x)); + i += 1; + } + assert_eq!(i, expected.len()); + } + + #[test] + fn test_from_map() { + let mut a = crate::HashMap::new(); + a.insert(1, ()); + a.insert(2, ()); + a.insert(3, ()); + a.insert(4, ()); + + let a: HashSet<_> = a.into(); + + assert_eq!(a.len(), 4); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + } + + #[test] + fn test_from_iter() { + let xs = [1, 2, 2, 3, 4, 5, 6, 7, 8, 9]; + + let set: HashSet<_> = xs.iter().copied().collect(); + + for x in &xs { + assert!(set.contains(x)); + } + + assert_eq!(set.iter().len(), xs.len() - 1); + } + + #[test] + fn test_move_iter() { + let hs = { + let mut hs = HashSet::new(); + + hs.insert('a'); + hs.insert('b'); + + hs + }; + + let v = hs.into_iter().collect::>(); + assert!(v == ['a', 'b'] || v == ['b', 'a']); + } + + #[test] + fn test_eq() { + // These constants once happened to expose a bug in insert(). + // I'm keeping them around to prevent a regression. + let mut s1 = HashSet::new(); + + s1.insert(1); + s1.insert(2); + s1.insert(3); + + let mut s2 = HashSet::new(); + + s2.insert(1); + s2.insert(2); + + assert!(s1 != s2); + + s2.insert(3); + + assert_eq!(s1, s2); + } + + #[test] + fn test_show() { + let mut set = HashSet::new(); + let empty = HashSet::::new(); + + set.insert(1); + set.insert(2); + + let set_str = format!("{:?}", set); + + assert!(set_str == "{1, 2}" || set_str == "{2, 1}"); + assert_eq!(format!("{:?}", empty), "{}"); + } + + #[test] + fn test_trivial_drain() { + let mut s = HashSet::::new(); + for _ in s.drain() {} + assert!(s.is_empty()); + drop(s); + + let mut s = HashSet::::new(); + drop(s.drain()); + assert!(s.is_empty()); + } + + #[test] + fn test_drain() { + let mut s: HashSet<_> = (1..100).collect(); + + // try this a bunch of times to make sure we don't screw up internal state. + for _ in 0..20 { + assert_eq!(s.len(), 99); + + { + let mut last_i = 0; + let mut d = s.drain(); + for (i, x) in d.by_ref().take(50).enumerate() { + last_i = i; + assert!(x != 0); + } + assert_eq!(last_i, 49); + } + + for _ in &s { + panic!("s should be empty!"); + } + + // reset to try again. + s.extend(1..100); + } + } + + #[test] + fn test_replace() { + use core::hash; + + #[derive(Debug)] + struct Foo(&'static str, i32); + + impl PartialEq for Foo { + fn eq(&self, other: &Self) -> bool { + self.0 == other.0 + } + } + + impl Eq for Foo {} + + impl hash::Hash for Foo { + fn hash(&self, h: &mut H) { + self.0.hash(h); + } + } + + let mut s = HashSet::new(); + assert_eq!(s.replace(Foo("a", 1)), None); + assert_eq!(s.len(), 1); + assert_eq!(s.replace(Foo("a", 2)), Some(Foo("a", 1))); + assert_eq!(s.len(), 1); + + let mut it = s.iter(); + assert_eq!(it.next(), Some(&Foo("a", 2))); + assert_eq!(it.next(), None); + } + + #[test] + fn test_extend_ref() { + let mut a = HashSet::new(); + a.insert(1); + + a.extend(&[2, 3, 4]); + + assert_eq!(a.len(), 4); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + + let mut b = HashSet::new(); + b.insert(5); + b.insert(6); + + a.extend(&b); + + assert_eq!(a.len(), 6); + assert!(a.contains(&1)); + assert!(a.contains(&2)); + assert!(a.contains(&3)); + assert!(a.contains(&4)); + assert!(a.contains(&5)); + assert!(a.contains(&6)); + } + + #[test] + fn test_retain() { + let xs = [1, 2, 3, 4, 5, 6]; + let mut set: HashSet = xs.iter().copied().collect(); + set.retain(|&k| k % 2 == 0); + assert_eq!(set.len(), 3); + assert!(set.contains(&2)); + assert!(set.contains(&4)); + assert!(set.contains(&6)); + } + + #[test] + fn test_drain_filter() { + { + let mut set: HashSet = (0..8).collect(); + let drained = set.drain_filter(|&k| k % 2 == 0); + let mut out = drained.collect::>(); + out.sort_unstable(); + assert_eq!(vec![0, 2, 4, 6], out); + assert_eq!(set.len(), 4); + } + { + let mut set: HashSet = (0..8).collect(); + drop(set.drain_filter(|&k| k % 2 == 0)); + assert_eq!(set.len(), 4, "Removes non-matching items on drop"); + } + } + + #[test] + fn test_const_with_hasher() { + use core::hash::BuildHasher; + use std::collections::hash_map::DefaultHasher; + + #[derive(Clone)] + struct MyHasher; + impl BuildHasher for MyHasher { + type Hasher = DefaultHasher; + + fn build_hasher(&self) -> DefaultHasher { + DefaultHasher::new() + } + } + + const EMPTY_SET: HashSet = HashSet::with_hasher(MyHasher); + + let mut set = EMPTY_SET; + set.insert(19); + assert!(set.contains(&19)); + } + + #[test] + fn rehash_in_place() { + let mut set = HashSet::new(); + + for i in 0..224 { + set.insert(i); + } + + assert_eq!( + set.capacity(), + 224, + "The set must be at or close to capacity to trigger a re hashing" + ); + + for i in 100..1400 { + set.remove(&(i - 100)); + set.insert(i); + } + } +} diff --git a/vendor/hashbrown-0.12.3/tests/hasher.rs b/vendor/hashbrown-0.12.3/tests/hasher.rs new file mode 100644 index 0000000..e455e3d --- /dev/null +++ b/vendor/hashbrown-0.12.3/tests/hasher.rs @@ -0,0 +1,65 @@ +//! Sanity check that alternate hashers work correctly. + +#![cfg(not(miri))] // FIXME: takes too long + +use hashbrown::HashSet; +use std::hash::{BuildHasher, BuildHasherDefault, Hasher}; + +fn check() { + let range = 0..1_000; + + let mut set = HashSet::::default(); + set.extend(range.clone()); + + assert!(!set.contains(&i32::min_value())); + assert!(!set.contains(&(range.start - 1))); + for i in range.clone() { + assert!(set.contains(&i)); + } + assert!(!set.contains(&range.end)); + assert!(!set.contains(&i32::max_value())); +} + +/// Use hashbrown's default hasher. +#[test] +fn default() { + check::(); +} + +/// Use std's default hasher. +#[test] +fn random_state() { + check::(); +} + +/// Use a constant 0 hash. +#[test] +fn zero() { + #[derive(Default)] + struct ZeroHasher; + + impl Hasher for ZeroHasher { + fn finish(&self) -> u64 { + 0 + } + fn write(&mut self, _: &[u8]) {} + } + + check::>(); +} + +/// Use a constant maximum hash. +#[test] +fn max() { + #[derive(Default)] + struct MaxHasher; + + impl Hasher for MaxHasher { + fn finish(&self) -> u64 { + u64::max_value() + } + fn write(&mut self, _: &[u8]) {} + } + + check::>(); +} diff --git a/vendor/hashbrown-0.12.3/tests/rayon.rs b/vendor/hashbrown-0.12.3/tests/rayon.rs new file mode 100644 index 0000000..8c603c5 --- /dev/null +++ b/vendor/hashbrown-0.12.3/tests/rayon.rs @@ -0,0 +1,533 @@ +#![cfg(feature = "rayon")] + +#[macro_use] +extern crate lazy_static; + +use hashbrown::{HashMap, HashSet}; +use rayon::iter::{ + IntoParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelExtend, + ParallelIterator, +}; + +macro_rules! assert_eq3 { + ($e1:expr, $e2:expr, $e3:expr) => {{ + assert_eq!($e1, $e2); + assert_eq!($e1, $e3); + assert_eq!($e2, $e3); + }}; +} + +lazy_static! { + static ref MAP_EMPTY: HashMap = HashMap::new(); + static ref MAP: HashMap = { + let mut m = HashMap::new(); + m.insert('b', 20); + m.insert('a', 10); + m.insert('c', 30); + m.insert('e', 50); + m.insert('f', 60); + m.insert('d', 40); + m + }; +} + +#[test] +fn map_seq_par_equivalence_iter_empty() { + let vec_seq = MAP_EMPTY.iter().collect::>(); + let vec_par = MAP_EMPTY.par_iter().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn map_seq_par_equivalence_iter() { + let mut vec_seq = MAP.iter().collect::>(); + let mut vec_par = MAP.par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [ + (&'a', &10), + (&'b', &20), + (&'c', &30), + (&'d', &40), + (&'e', &50), + (&'f', &60), + ]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_keys_empty() { + let vec_seq = MAP_EMPTY.keys().collect::>(); + let vec_par = MAP_EMPTY.par_keys().collect::>(); + + let expected: [&char; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn map_seq_par_equivalence_keys() { + let mut vec_seq = MAP.keys().collect::>(); + let mut vec_par = MAP.par_keys().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&'a', &'b', &'c', &'d', &'e', &'f']; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_values_empty() { + let vec_seq = MAP_EMPTY.values().collect::>(); + let vec_par = MAP_EMPTY.par_values().collect::>(); + + let expected: [&u32; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn map_seq_par_equivalence_values() { + let mut vec_seq = MAP.values().collect::>(); + let mut vec_par = MAP.par_values().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&10, &20, &30, &40, &50, &60]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_iter_mut_empty() { + let mut map1 = MAP_EMPTY.clone(); + let mut map2 = MAP_EMPTY.clone(); + + let vec_seq = map1.iter_mut().collect::>(); + let vec_par = map2.par_iter_mut().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn map_seq_par_equivalence_iter_mut() { + let mut map1 = MAP.clone(); + let mut map2 = MAP.clone(); + + let mut vec_seq = map1.iter_mut().collect::>(); + let mut vec_par = map2.par_iter_mut().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [ + (&'a', &mut 10), + (&'b', &mut 20), + (&'c', &mut 30), + (&'d', &mut 40), + (&'e', &mut 50), + (&'f', &mut 60), + ]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_values_mut_empty() { + let mut map1 = MAP_EMPTY.clone(); + let mut map2 = MAP_EMPTY.clone(); + + let vec_seq = map1.values_mut().collect::>(); + let vec_par = map2.par_values_mut().collect::>(); + + let expected: [&u32; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn map_seq_par_equivalence_values_mut() { + let mut map1 = MAP.clone(); + let mut map2 = MAP.clone(); + + let mut vec_seq = map1.values_mut().collect::>(); + let mut vec_par = map2.par_values_mut().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&mut 10, &mut 20, &mut 30, &mut 40, &mut 50, &mut 60]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn map_seq_par_equivalence_into_iter_empty() { + let vec_seq = MAP_EMPTY.clone().into_iter().collect::>(); + let vec_par = MAP_EMPTY.clone().into_par_iter().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn map_seq_par_equivalence_into_iter() { + let mut vec_seq = MAP.clone().into_iter().collect::>(); + let mut vec_par = MAP.clone().into_par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [ + ('a', 10), + ('b', 20), + ('c', 30), + ('d', 40), + ('e', 50), + ('f', 60), + ]; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +lazy_static! { + static ref MAP_VEC_EMPTY: Vec<(char, u32)> = vec![]; + static ref MAP_VEC: Vec<(char, u32)> = vec![ + ('b', 20), + ('a', 10), + ('c', 30), + ('e', 50), + ('f', 60), + ('d', 40), + ]; +} + +#[test] +fn map_seq_par_equivalence_collect_empty() { + let map_expected = MAP_EMPTY.clone(); + let map_seq = MAP_VEC_EMPTY.clone().into_iter().collect::>(); + let map_par = MAP_VEC_EMPTY + .clone() + .into_par_iter() + .collect::>(); + + assert_eq!(map_seq, map_par); + assert_eq!(map_seq, map_expected); + assert_eq!(map_par, map_expected); +} + +#[test] +fn map_seq_par_equivalence_collect() { + let map_expected = MAP.clone(); + let map_seq = MAP_VEC.clone().into_iter().collect::>(); + let map_par = MAP_VEC.clone().into_par_iter().collect::>(); + + assert_eq!(map_seq, map_par); + assert_eq!(map_seq, map_expected); + assert_eq!(map_par, map_expected); +} + +lazy_static! { + static ref MAP_EXISTING_EMPTY: HashMap = HashMap::new(); + static ref MAP_EXISTING: HashMap = { + let mut m = HashMap::new(); + m.insert('b', 20); + m.insert('a', 10); + m + }; + static ref MAP_EXTENSION_EMPTY: Vec<(char, u32)> = vec![]; + static ref MAP_EXTENSION: Vec<(char, u32)> = vec![('c', 30), ('e', 50), ('f', 60), ('d', 40),]; +} + +#[test] +fn map_seq_par_equivalence_existing_empty_extend_empty() { + let expected = HashMap::new(); + let mut map_seq = MAP_EXISTING_EMPTY.clone(); + let mut map_par = MAP_EXISTING_EMPTY.clone(); + + map_seq.extend(MAP_EXTENSION_EMPTY.iter().copied()); + map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().copied()); + + assert_eq3!(map_seq, map_par, expected); +} + +#[test] +fn map_seq_par_equivalence_existing_empty_extend() { + let expected = MAP_EXTENSION.iter().copied().collect::>(); + let mut map_seq = MAP_EXISTING_EMPTY.clone(); + let mut map_par = MAP_EXISTING_EMPTY.clone(); + + map_seq.extend(MAP_EXTENSION.iter().copied()); + map_par.par_extend(MAP_EXTENSION.par_iter().copied()); + + assert_eq3!(map_seq, map_par, expected); +} + +#[test] +fn map_seq_par_equivalence_existing_extend_empty() { + let expected = MAP_EXISTING.clone(); + let mut map_seq = MAP_EXISTING.clone(); + let mut map_par = MAP_EXISTING.clone(); + + map_seq.extend(MAP_EXTENSION_EMPTY.iter().copied()); + map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().copied()); + + assert_eq3!(map_seq, map_par, expected); +} + +#[test] +fn map_seq_par_equivalence_existing_extend() { + let expected = MAP.clone(); + let mut map_seq = MAP_EXISTING.clone(); + let mut map_par = MAP_EXISTING.clone(); + + map_seq.extend(MAP_EXTENSION.iter().copied()); + map_par.par_extend(MAP_EXTENSION.par_iter().copied()); + + assert_eq3!(map_seq, map_par, expected); +} + +lazy_static! { + static ref SET_EMPTY: HashSet = HashSet::new(); + static ref SET: HashSet = { + let mut s = HashSet::new(); + s.insert('b'); + s.insert('a'); + s.insert('c'); + s.insert('e'); + s.insert('f'); + s.insert('d'); + s + }; +} + +#[test] +fn set_seq_par_equivalence_iter_empty() { + let vec_seq = SET_EMPTY.iter().collect::>(); + let vec_par = SET_EMPTY.par_iter().collect::>(); + + let expected: [&char; 0] = []; + + assert_eq3!(vec_seq, vec_par, expected); +} + +#[test] +fn set_seq_par_equivalence_iter() { + let mut vec_seq = SET.iter().collect::>(); + let mut vec_par = SET.par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = [&'a', &'b', &'c', &'d', &'e', &'f']; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +#[test] +fn set_seq_par_equivalence_into_iter_empty() { + let vec_seq = SET_EMPTY.clone().into_iter().collect::>(); + let vec_par = SET_EMPTY.clone().into_par_iter().collect::>(); + + assert_eq3!(vec_seq, vec_par, []); +} + +#[test] +fn set_seq_par_equivalence_into_iter() { + let mut vec_seq = SET.clone().into_iter().collect::>(); + let mut vec_par = SET.clone().into_par_iter().collect::>(); + + assert_eq!(vec_seq, vec_par); + + // Do not depend on the exact order of values + let expected_sorted = ['a', 'b', 'c', 'd', 'e', 'f']; + + vec_seq.sort_unstable(); + vec_par.sort_unstable(); + + assert_eq3!(vec_seq, vec_par, expected_sorted); +} + +lazy_static! { + static ref SET_VEC_EMPTY: Vec = vec![]; + static ref SET_VEC: Vec = vec!['b', 'a', 'c', 'e', 'f', 'd',]; +} + +#[test] +fn set_seq_par_equivalence_collect_empty() { + let set_expected = SET_EMPTY.clone(); + let set_seq = SET_VEC_EMPTY.clone().into_iter().collect::>(); + let set_par = SET_VEC_EMPTY + .clone() + .into_par_iter() + .collect::>(); + + assert_eq!(set_seq, set_par); + assert_eq!(set_seq, set_expected); + assert_eq!(set_par, set_expected); +} + +#[test] +fn set_seq_par_equivalence_collect() { + let set_expected = SET.clone(); + let set_seq = SET_VEC.clone().into_iter().collect::>(); + let set_par = SET_VEC.clone().into_par_iter().collect::>(); + + assert_eq!(set_seq, set_par); + assert_eq!(set_seq, set_expected); + assert_eq!(set_par, set_expected); +} + +lazy_static! { + static ref SET_EXISTING_EMPTY: HashSet = HashSet::new(); + static ref SET_EXISTING: HashSet = { + let mut s = HashSet::new(); + s.insert('b'); + s.insert('a'); + s + }; + static ref SET_EXTENSION_EMPTY: Vec = vec![]; + static ref SET_EXTENSION: Vec = vec!['c', 'e', 'f', 'd',]; +} + +#[test] +fn set_seq_par_equivalence_existing_empty_extend_empty() { + let expected = HashSet::new(); + let mut set_seq = SET_EXISTING_EMPTY.clone(); + let mut set_par = SET_EXISTING_EMPTY.clone(); + + set_seq.extend(SET_EXTENSION_EMPTY.iter().copied()); + set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().copied()); + + assert_eq3!(set_seq, set_par, expected); +} + +#[test] +fn set_seq_par_equivalence_existing_empty_extend() { + let expected = SET_EXTENSION.iter().copied().collect::>(); + let mut set_seq = SET_EXISTING_EMPTY.clone(); + let mut set_par = SET_EXISTING_EMPTY.clone(); + + set_seq.extend(SET_EXTENSION.iter().copied()); + set_par.par_extend(SET_EXTENSION.par_iter().copied()); + + assert_eq3!(set_seq, set_par, expected); +} + +#[test] +fn set_seq_par_equivalence_existing_extend_empty() { + let expected = SET_EXISTING.clone(); + let mut set_seq = SET_EXISTING.clone(); + let mut set_par = SET_EXISTING.clone(); + + set_seq.extend(SET_EXTENSION_EMPTY.iter().copied()); + set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().copied()); + + assert_eq3!(set_seq, set_par, expected); +} + +#[test] +fn set_seq_par_equivalence_existing_extend() { + let expected = SET.clone(); + let mut set_seq = SET_EXISTING.clone(); + let mut set_par = SET_EXISTING.clone(); + + set_seq.extend(SET_EXTENSION.iter().copied()); + set_par.par_extend(SET_EXTENSION.par_iter().copied()); + + assert_eq3!(set_seq, set_par, expected); +} + +lazy_static! { + static ref SET_A: HashSet = ['a', 'b', 'c', 'd'].iter().copied().collect(); + static ref SET_B: HashSet = ['a', 'b', 'e', 'f'].iter().copied().collect(); + static ref SET_DIFF_AB: HashSet = ['c', 'd'].iter().copied().collect(); + static ref SET_DIFF_BA: HashSet = ['e', 'f'].iter().copied().collect(); + static ref SET_SYMM_DIFF_AB: HashSet = ['c', 'd', 'e', 'f'].iter().copied().collect(); + static ref SET_INTERSECTION_AB: HashSet = ['a', 'b'].iter().copied().collect(); + static ref SET_UNION_AB: HashSet = + ['a', 'b', 'c', 'd', 'e', 'f'].iter().copied().collect(); +} + +#[test] +fn set_seq_par_equivalence_difference() { + let diff_ab_seq = SET_A.difference(&*SET_B).copied().collect::>(); + let diff_ab_par = SET_A + .par_difference(&*SET_B) + .copied() + .collect::>(); + + assert_eq3!(diff_ab_seq, diff_ab_par, *SET_DIFF_AB); + + let diff_ba_seq = SET_B.difference(&*SET_A).copied().collect::>(); + let diff_ba_par = SET_B + .par_difference(&*SET_A) + .copied() + .collect::>(); + + assert_eq3!(diff_ba_seq, diff_ba_par, *SET_DIFF_BA); +} + +#[test] +fn set_seq_par_equivalence_symmetric_difference() { + let symm_diff_ab_seq = SET_A + .symmetric_difference(&*SET_B) + .copied() + .collect::>(); + let symm_diff_ab_par = SET_A + .par_symmetric_difference(&*SET_B) + .copied() + .collect::>(); + + assert_eq3!(symm_diff_ab_seq, symm_diff_ab_par, *SET_SYMM_DIFF_AB); +} + +#[test] +fn set_seq_par_equivalence_intersection() { + let intersection_ab_seq = SET_A.intersection(&*SET_B).copied().collect::>(); + let intersection_ab_par = SET_A + .par_intersection(&*SET_B) + .copied() + .collect::>(); + + assert_eq3!( + intersection_ab_seq, + intersection_ab_par, + *SET_INTERSECTION_AB + ); +} + +#[test] +fn set_seq_par_equivalence_union() { + let union_ab_seq = SET_A.union(&*SET_B).copied().collect::>(); + let union_ab_par = SET_A.par_union(&*SET_B).copied().collect::>(); + + assert_eq3!(union_ab_seq, union_ab_par, *SET_UNION_AB); +} diff --git a/vendor/hashbrown-0.12.3/tests/serde.rs b/vendor/hashbrown-0.12.3/tests/serde.rs new file mode 100644 index 0000000..a642348 --- /dev/null +++ b/vendor/hashbrown-0.12.3/tests/serde.rs @@ -0,0 +1,65 @@ +#![cfg(feature = "serde")] + +use core::hash::BuildHasherDefault; +use fnv::FnvHasher; +use hashbrown::{HashMap, HashSet}; +use serde_test::{assert_tokens, Token}; + +// We use FnvHash for this test because we rely on the ordering +type FnvHashMap = HashMap>; +type FnvHashSet = HashSet>; + +#[test] +fn map_serde_tokens_empty() { + let map = FnvHashMap::::default(); + + assert_tokens(&map, &[Token::Map { len: Some(0) }, Token::MapEnd]); +} + +#[test] +fn map_serde_tokens() { + let mut map = FnvHashMap::default(); + map.insert('b', 20); + map.insert('a', 10); + map.insert('c', 30); + + assert_tokens( + &map, + &[ + Token::Map { len: Some(3) }, + Token::Char('a'), + Token::I32(10), + Token::Char('c'), + Token::I32(30), + Token::Char('b'), + Token::I32(20), + Token::MapEnd, + ], + ); +} + +#[test] +fn set_serde_tokens_empty() { + let set = FnvHashSet::::default(); + + assert_tokens(&set, &[Token::Seq { len: Some(0) }, Token::SeqEnd]); +} + +#[test] +fn set_serde_tokens() { + let mut set = FnvHashSet::default(); + set.insert(20); + set.insert(10); + set.insert(30); + + assert_tokens( + &set, + &[ + Token::Seq { len: Some(3) }, + Token::I32(30), + Token::I32(20), + Token::I32(10), + Token::SeqEnd, + ], + ); +} diff --git a/vendor/hashbrown-0.12.3/tests/set.rs b/vendor/hashbrown-0.12.3/tests/set.rs new file mode 100644 index 0000000..5ae1ec9 --- /dev/null +++ b/vendor/hashbrown-0.12.3/tests/set.rs @@ -0,0 +1,34 @@ +#![cfg(not(miri))] // FIXME: takes too long + +use hashbrown::HashSet; +use rand::{distributions::Alphanumeric, rngs::SmallRng, Rng, SeedableRng}; +use std::iter; + +#[test] +fn test_hashset_insert_remove() { + let mut m: HashSet> = HashSet::new(); + let seed = u64::from_le_bytes(*b"testseed"); + + let rng = &mut SmallRng::seed_from_u64(seed); + let tx: Vec> = iter::repeat_with(|| { + rng.sample_iter(&Alphanumeric) + .take(32) + .map(char::from) + .collect() + }) + .take(4096) + .collect(); + + // more readable with explicit `true` / `false` + #[allow(clippy::bool_assert_comparison)] + for _ in 0..32 { + for x in &tx { + assert_eq!(m.contains(x), false); + assert_eq!(m.insert(x.clone()), true); + } + for (i, x) in tx.iter().enumerate() { + println!("removing {} {:?}", i, x); + assert_eq!(m.remove(x), true); + } + } +} -- Gitee