Bug 1950364 - Update and vendor minidump-writer r=glandium,supply-chain-reviewers,gsvelto,nika
Differential Revision: https://phabricator.services.mozilla.com/D239530
This commit is contained in:
70
Cargo.lock
generated
70
Cargo.lock
generated
@@ -581,9 +581,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "breakpad-symbols"
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6aeaa2a7f839cbb61c2f59ad6e51cc3fd2c24aa2103cb24e6be143bcc114aa24"
|
||||
checksum = "05cc04995b4f6f26dc9cc5989e93e42c373def047b4b057aaf8f48400b971d1e"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"cachemap2",
|
||||
@@ -612,7 +612,7 @@ dependencies = [
|
||||
name = "buildid_reader"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"goblin 0.8.2",
|
||||
"goblin 0.8.999",
|
||||
"libc",
|
||||
"log",
|
||||
"scroll",
|
||||
@@ -1792,6 +1792,15 @@ dependencies = [
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "error-graph"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b920e777967421aa5f9bf34f842c0ab6ba19b3bdb4a082946093860f5858879"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "error-support"
|
||||
version = "0.1.0"
|
||||
@@ -1846,6 +1855,12 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "failspot"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c942e64b20ecd39933d5ff938ca4fdb6ef0d298cc3855b231179a5ef0b24948d"
|
||||
|
||||
[[package]]
|
||||
name = "fallible-iterator"
|
||||
version = "0.3.0"
|
||||
@@ -2119,9 +2134,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "framehop"
|
||||
version = "0.12.1"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fd28d2036d4fd99e3629487baca659e5af1c5d554e320168613be79028610fc"
|
||||
checksum = "33e8ad8f843eb89b4ec8270be4d5840dc6b81ca1a1c1e036b17e94076f36eed4"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"cfg-if",
|
||||
@@ -2369,9 +2384,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "gimli"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2e1d97fbe9722ba9bbd0c97051c2956e726562b61f86a25a4360398a40edfc9"
|
||||
checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64"
|
||||
dependencies = [
|
||||
"fallible-iterator",
|
||||
"stable_deref_trait",
|
||||
@@ -2649,16 +2664,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "goblin"
|
||||
version = "0.7.999"
|
||||
version = "0.8.999"
|
||||
dependencies = [
|
||||
"goblin 0.8.2",
|
||||
"goblin 0.9.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "goblin"
|
||||
version = "0.8.2"
|
||||
version = "0.9.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1b363a30c165f666402fe6a3024d3bec7ebc898f96a4a23bd1c99f8dbf3f4f47"
|
||||
checksum = "53ab3f32d1d77146981dea5d6b1e8fe31eedcb7013e5e00d6ccd1259a4b4d923"
|
||||
dependencies = [
|
||||
"log",
|
||||
"plain",
|
||||
@@ -3900,9 +3915,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "minidump"
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cee91aa51259518a08a12c18b5754e45135f89f1d9d7d6aae76ce93b92686698"
|
||||
checksum = "e03e301d414a75655d4ce80e6e3690fbfe70814b67c496c64c826ba558d18ec9"
|
||||
dependencies = [
|
||||
"debugid",
|
||||
"encoding_rs",
|
||||
@@ -3945,9 +3960,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "minidump-common"
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0cd8a9fb054833d2f402e82e256aeef544e595e45fe8fca2de6d03ed605f6647"
|
||||
checksum = "5273687f49325b3977f7d372a1bbe2e528694d18128de8dcac78d134448e83b4"
|
||||
dependencies = [
|
||||
"bitflags 2.8.0",
|
||||
"debugid",
|
||||
@@ -3960,9 +3975,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "minidump-unwind"
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "efde3c09258c297c0f6761f04d97771ef82a59a6734e7ba0e6e2ef961fb3cbb3"
|
||||
checksum = "c30454f5703c77433b4059bf5e196266b800b14223c55793ee636e49c8f9160e"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"breakpad-symbols",
|
||||
@@ -3978,15 +3993,17 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "minidump-writer"
|
||||
version = "0.10.1"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1c75ff36a030d76801ed7ec3ea4ae45f12c0f1297f3447790288194274e9aa98"
|
||||
checksum = "6e9370e1f326cb4385f78355d8a0f68f429e9002fd3ca53fff9b43fded234473"
|
||||
dependencies = [
|
||||
"bitflags 2.8.0",
|
||||
"byteorder",
|
||||
"cfg-if",
|
||||
"crash-context",
|
||||
"goblin 0.8.2",
|
||||
"error-graph",
|
||||
"failspot",
|
||||
"goblin 0.9.2",
|
||||
"libc",
|
||||
"log",
|
||||
"mach2",
|
||||
@@ -3996,8 +4013,10 @@ dependencies = [
|
||||
"nix 0.29.0",
|
||||
"procfs-core",
|
||||
"scroll",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tempfile",
|
||||
"thiserror 1.999.999",
|
||||
"thiserror 2.0.9",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5128,7 +5147,7 @@ dependencies = [
|
||||
name = "process_reader"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"goblin 0.7.999",
|
||||
"goblin 0.9.2",
|
||||
"libc",
|
||||
"mach2",
|
||||
"memoffset 0.9.0",
|
||||
@@ -5151,12 +5170,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "procfs-core"
|
||||
version = "0.16.0"
|
||||
version = "0.17.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29"
|
||||
checksum = "239df02d8349b06fc07398a3a1697b06418223b1c7725085e801e7c0fc6a12ec"
|
||||
dependencies = [
|
||||
"bitflags 2.8.0",
|
||||
"hex",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6855,7 +6875,7 @@ dependencies = [
|
||||
"cargo_metadata",
|
||||
"fs-err",
|
||||
"glob",
|
||||
"goblin 0.8.2",
|
||||
"goblin 0.8.999",
|
||||
"heck",
|
||||
"once_cell",
|
||||
"paste",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "goblin"
|
||||
version = "0.7.999"
|
||||
version = "0.8.999"
|
||||
edition = "2018"
|
||||
license = "MIT OR Apache-2.0"
|
||||
|
||||
@@ -8,7 +8,7 @@ license = "MIT OR Apache-2.0"
|
||||
path = "lib.rs"
|
||||
|
||||
[dependencies.goblin]
|
||||
version = "0.8.0"
|
||||
version = "0.9.0"
|
||||
|
||||
default-features = false
|
||||
|
||||
|
||||
@@ -2023,6 +2023,12 @@ who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.3.1 -> 0.3.3"
|
||||
|
||||
[[audits.error-graph]]
|
||||
who = "Chris Martin <cmartin@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.1.1"
|
||||
notes = "This code was written and reviewed by Mozilla employees"
|
||||
|
||||
[[audits.extend]]
|
||||
who = "Ben Dean-Kawamura <bdk@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -2034,6 +2040,12 @@ who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "1.1.2 -> 1.2.0"
|
||||
|
||||
[[audits.failspot]]
|
||||
who = "Chris Martin <cmartin@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.2.0"
|
||||
notes = "This code was written and reviewed by Mozilla employees"
|
||||
|
||||
[[audits.fallible_collections]]
|
||||
who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -2445,6 +2457,12 @@ Unsafe code blocks are sound. Minimal dependencies used. No use of
|
||||
side-effectful std functions.
|
||||
"""
|
||||
|
||||
[[audits.gimli]]
|
||||
who = "Chris Martin <cmartin@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.30.0 -> 0.29.0"
|
||||
notes = "No unsafe code, mostly algorithms and parsing. Very unlikely to cause security issues."
|
||||
|
||||
[[audits.gleam]]
|
||||
who = "Jamie Nicol <jnicol@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -2496,6 +2514,12 @@ criteria = "safe-to-deploy"
|
||||
delta = "0.8.1 -> 0.8.2"
|
||||
notes = "Removes the TE feature/functionality, otherwise no meaningful changes."
|
||||
|
||||
[[audits.goblin]]
|
||||
who = "Chris Martin <cmartin@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.8.2 -> 0.9.2"
|
||||
notes = "Doesn't use any unsafe code, mostly parsing and arithmetic."
|
||||
|
||||
[[audits.gpu-alloc]]
|
||||
who = "Teodor Tanasoaia <ttanasoaia@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -3351,6 +3375,12 @@ criteria = "safe-to-deploy"
|
||||
delta = "0.8.9 -> 0.10.1"
|
||||
notes = "Crate written and reviewed by mozilla employees."
|
||||
|
||||
[[audits.minidump-writer]]
|
||||
who = "Chris Martin <cmartin@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.10.1 -> 0.10.2"
|
||||
notes = "This patch was written and reviewed by Mozilla employees"
|
||||
|
||||
[[audits.miniz_oxide]]
|
||||
who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
@@ -4097,6 +4127,12 @@ who = "Gabriele Svelto <gsvelto@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.16.0-RC1 -> 0.16.0"
|
||||
|
||||
[[audits.procfs-core]]
|
||||
who = "Chris Martin <cmartin@mozilla.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.16.0 -> 0.17.0"
|
||||
notes = "Lots of code, but nothing unsafe and mostly parsing various text formats output by /proc files"
|
||||
|
||||
[[audits.profiling]]
|
||||
who = "Mike Hommey <mh+mozilla@glandium.org>"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
@@ -62,8 +62,8 @@ user-login = "martinthomson"
|
||||
user-name = "Martin Thomson"
|
||||
|
||||
[[publisher.breakpad-symbols]]
|
||||
version = "0.22.1"
|
||||
when = "2024-09-05"
|
||||
version = "0.24.0"
|
||||
when = "2025-01-06"
|
||||
user-id = 72814
|
||||
user-login = "gabrielesvelto"
|
||||
user-name = "Gabriele Svelto"
|
||||
@@ -237,8 +237,8 @@ user-login = "joshtriplett"
|
||||
user-name = "Josh Triplett"
|
||||
|
||||
[[publisher.framehop]]
|
||||
version = "0.12.1"
|
||||
when = "2024-06-04"
|
||||
version = "0.13.0"
|
||||
when = "2024-07-24"
|
||||
user-id = 20227
|
||||
user-login = "mstange"
|
||||
user-name = "Markus Stange"
|
||||
@@ -391,22 +391,22 @@ user-login = "seanmonstar"
|
||||
user-name = "Sean McArthur"
|
||||
|
||||
[[publisher.minidump]]
|
||||
version = "0.22.1"
|
||||
when = "2024-09-05"
|
||||
version = "0.24.0"
|
||||
when = "2025-01-06"
|
||||
user-id = 72814
|
||||
user-login = "gabrielesvelto"
|
||||
user-name = "Gabriele Svelto"
|
||||
|
||||
[[publisher.minidump-common]]
|
||||
version = "0.22.1"
|
||||
when = "2024-09-05"
|
||||
version = "0.24.0"
|
||||
when = "2025-01-06"
|
||||
user-id = 72814
|
||||
user-login = "gabrielesvelto"
|
||||
user-name = "Gabriele Svelto"
|
||||
|
||||
[[publisher.minidump-unwind]]
|
||||
version = "0.22.1"
|
||||
when = "2024-09-05"
|
||||
version = "0.24.0"
|
||||
when = "2025-01-06"
|
||||
user-id = 72814
|
||||
user-login = "gabrielesvelto"
|
||||
user-name = "Gabriele Svelto"
|
||||
@@ -1169,6 +1169,12 @@ who = "Pat Hickey <phickey@fastly.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
version = "0.3.27"
|
||||
|
||||
[[audits.bytecode-alliance.audits.gimli]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
delta = "0.29.0 -> 0.31.0"
|
||||
notes = "Various updates here and there, nothing too major, what you'd expect from a DWARF parsing crate."
|
||||
|
||||
[[audits.bytecode-alliance.audits.heck]]
|
||||
who = "Alex Crichton <alex@alexcrichton.com>"
|
||||
criteria = "safe-to-deploy"
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"2dfb12c51f860b95f13b937e550dc7579bfe122854861717b8aed2c25fe51fe3","LICENSE":"06de63df29199a394442b57a28e886059ddc940973e10646877a0793fd53e2c9","README.md":"b0b97fcaf1d9eb5a3f3ca1fc0b0b1f593f7a116465ddcb8158541a40ff98660a","src/http.rs":"025a542391b2464fb6bdc769786b7c3d7ab697d932ee198360bc926e5e2b5cb6","src/lib.rs":"dd9a6cf9a140e5132db87e072550afa2418e2bb75cc0c652d929047e69850f6f","src/sym_file/mod.rs":"bb1c42d9b8823eabca753a7eff11533fdf403bcb0e0c91b298fdf07bcfde023e","src/sym_file/parser.rs":"6fbfd6805e8ef2cdadfd6c171d6ad40647a481760e7296f0ac093cb767fdf8dc","src/sym_file/types.rs":"c23a928bf092cbc9302316777ea00e416706bda6879ce7866a118ba18dbb718c","src/sym_file/walker.rs":"05f31914eb04186cdb292d68eb2f5bc5f2be9112e853867e49cc26eee1518a0a"},"package":"6aeaa2a7f839cbb61c2f59ad6e51cc3fd2c24aa2103cb24e6be143bcc114aa24"}
|
||||
{"files":{"Cargo.toml":"a7debd586eea67b4edf0b792d50d43eb5a13976f83696573252aa127cf495bfd","LICENSE":"06de63df29199a394442b57a28e886059ddc940973e10646877a0793fd53e2c9","README.md":"b0b97fcaf1d9eb5a3f3ca1fc0b0b1f593f7a116465ddcb8158541a40ff98660a","src/http.rs":"025a542391b2464fb6bdc769786b7c3d7ab697d932ee198360bc926e5e2b5cb6","src/lib.rs":"9b75eb5e197a9b5cd1a62050d2355a1694688ee29828096c9624638f02045cb4","src/sym_file/mod.rs":"bb1c42d9b8823eabca753a7eff11533fdf403bcb0e0c91b298fdf07bcfde023e","src/sym_file/parser.rs":"6fbfd6805e8ef2cdadfd6c171d6ad40647a481760e7296f0ac093cb767fdf8dc","src/sym_file/types.rs":"c23a928bf092cbc9302316777ea00e416706bda6879ce7866a118ba18dbb718c","src/sym_file/walker.rs":"05f31914eb04186cdb292d68eb2f5bc5f2be9112e853867e49cc26eee1518a0a"},"package":"05cc04995b4f6f26dc9cc5989e93e42c373def047b4b057aaf8f48400b971d1e"}
|
||||
9
third_party/rust/breakpad-symbols/Cargo.toml
vendored
9
third_party/rust/breakpad-symbols/Cargo.toml
vendored
@@ -12,10 +12,11 @@
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "breakpad-symbols"
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
authors = ["Ted Mielczarek <ted@mielczarek.org>"]
|
||||
build = false
|
||||
exclude = ["testdata/*"]
|
||||
autolib = false
|
||||
autobins = false
|
||||
autoexamples = false
|
||||
autotests = false
|
||||
@@ -35,7 +36,7 @@ path = "src/lib.rs"
|
||||
version = "0.1.52"
|
||||
|
||||
[dependencies.cab]
|
||||
version = "0.5.0"
|
||||
version = "0.6.0"
|
||||
optional = true
|
||||
|
||||
[dependencies.cachemap2]
|
||||
@@ -51,7 +52,7 @@ version = "0.8.0"
|
||||
version = "0.3"
|
||||
|
||||
[dependencies.minidump-common]
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
|
||||
[dependencies.nom]
|
||||
version = "7"
|
||||
@@ -60,7 +61,7 @@ version = "7"
|
||||
version = "0.2"
|
||||
|
||||
[dependencies.reqwest]
|
||||
version = "0.11.6"
|
||||
version = "0.12"
|
||||
features = [
|
||||
"gzip",
|
||||
"rustls-tls",
|
||||
|
||||
1
third_party/rust/breakpad-symbols/src/lib.rs
vendored
1
third_party/rust/breakpad-symbols/src/lib.rs
vendored
@@ -715,7 +715,6 @@ impl<T, E> CachedAsyncResult<T, E> {
|
||||
/// [simple]: struct.SimpleSymbolSupplier.html
|
||||
/// [get_symbol]: struct.Symbolizer.html#method.get_symbol_at_address
|
||||
/// [fill_symbol]: struct.Symbolizer.html#method.fill_symbol
|
||||
|
||||
pub struct Symbolizer {
|
||||
/// Symbol supplier for locating symbols.
|
||||
supplier: Box<dyn SymbolSupplier + Send + Sync + 'static>,
|
||||
|
||||
1
third_party/rust/error-graph/.cargo-checksum.json
vendored
Normal file
1
third_party/rust/error-graph/.cargo-checksum.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"files":{"Cargo.toml":"bfef4a4465ffe3fb3ca6e123b115e0c77f4b2bb9612935e45b032353f7ff32c6","LICENSE":"30fefc3a7d6a0041541858293bcbea2dde4caa4c0a5802f996a7f7e8c0085652","README.md":"8e867e8b78902382d2f9c91dcb3b4f9e23bd1690f7f6f62c5d02751ac58ec048","src/lib.rs":"bfdeade34ed8d16378fc2e777bc5952c9d453acec0aabdaee634e35801e1afb4","src/strategy.rs":"c6d243c72b8f61ffb9d240f177d10bd9798cdebe5a9004ce48e0defb15b7a991","tests/error_graph.rs":"4db68c13fcb1e7809a72407e225e8b70ff2ce2a602cb7978bf6bde4ef7fa2fd4"},"package":"9b920e777967421aa5f9bf34f842c0ab6ba19b3bdb4a082946093860f5858879"}
|
||||
52
third_party/rust/error-graph/Cargo.toml
vendored
Normal file
52
third_party/rust/error-graph/Cargo.toml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "error-graph"
|
||||
version = "0.1.1"
|
||||
authors = ["Chris Martin <marti4d@live.ca>"]
|
||||
build = false
|
||||
autobins = false
|
||||
autoexamples = false
|
||||
autotests = false
|
||||
autobenches = false
|
||||
description = "Allows non-fatal errors in a tree of subfunctions to easily be collected by a caller"
|
||||
readme = "README.md"
|
||||
keywords = [
|
||||
"error",
|
||||
"error-handling",
|
||||
]
|
||||
categories = ["rust-patterns"]
|
||||
license = "MIT"
|
||||
repository = "https://github.com/marti4d/error-graph"
|
||||
|
||||
[lib]
|
||||
name = "error_graph"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[test]]
|
||||
name = "error_graph"
|
||||
path = "tests/error_graph.rs"
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1"
|
||||
optional = true
|
||||
|
||||
[dev-dependencies.serde]
|
||||
version = "1"
|
||||
features = ["derive"]
|
||||
|
||||
[dev-dependencies.serde_json]
|
||||
version = "1"
|
||||
|
||||
[features]
|
||||
serde = ["dep:serde"]
|
||||
23
third_party/rust/error-graph/LICENSE
vendored
Normal file
23
third_party/rust/error-graph/LICENSE
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
||||
145
third_party/rust/error-graph/README.md
vendored
Normal file
145
third_party/rust/error-graph/README.md
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
# error-graph
|
||||
|
||||
Allows non-fatal errors in a tree of subfunctions to easily be collected by a caller
|
||||
|
||||
Provides the `error_graph::ErrorList<E>` type to hold a list of non-fatal errors
|
||||
that occurred while a function was running.
|
||||
|
||||
It has a `subwriter()` method that can be passed as a parameter to
|
||||
a subfunction and allows that subfunction to record all the non-fatal errors it encounters.
|
||||
When the subfunction is done running, its error list will be mapped to the caller's error type
|
||||
and added to the caller's `ErrorList` automatically.
|
||||
|
||||
Since subfunctions may in-turn also use the `subwriter()`
|
||||
function on the writter given to them by their caller, this creates a tree of non-fatal errors
|
||||
that occurred during the execution of an entire call graph.
|
||||
|
||||
# Usage
|
||||
|
||||
```
|
||||
# use error_graph::{ErrorList, WriteErrorList, strategy::{DontCare, ErrorOccurred}};
|
||||
enum UpperError {
|
||||
Upper,
|
||||
Middle(ErrorList<MiddleError>),
|
||||
}
|
||||
enum MiddleError {
|
||||
Middle,
|
||||
Lower(ErrorList<LowerError>),
|
||||
}
|
||||
enum LowerError {
|
||||
Lower,
|
||||
}
|
||||
fn upper() {
|
||||
let mut errors = ErrorList::default();
|
||||
errors.push(UpperError::Upper);
|
||||
// Map the ErrorList<MiddleError> to our UpperError::Middle variant
|
||||
middle(errors.subwriter(UpperError::Middle));
|
||||
errors.push(UpperError::Upper);
|
||||
|
||||
// Some callers just don't want to know if things went wrong or not
|
||||
middle(DontCare);
|
||||
|
||||
// Some callers are only interested in whether an error occurred or not
|
||||
let mut error_occurred = ErrorOccurred::default();
|
||||
middle(&mut error_occurred);
|
||||
if error_occurred.as_bool() {
|
||||
errors.push(UpperError::Upper);
|
||||
}
|
||||
}
|
||||
fn middle(mut errors: impl WriteErrorList<MiddleError>) {
|
||||
// We can pass a sublist by mutable reference if we need to manipulate it before and after
|
||||
let mut sublist = errors.sublist(MiddleError::Lower);
|
||||
lower(&mut sublist);
|
||||
let num_errors = sublist.len();
|
||||
sublist.finish();
|
||||
if num_errors > 10 {
|
||||
errors.push(MiddleError::Middle);
|
||||
}
|
||||
// We can pass a reference directly to our error list for peer functions
|
||||
middle_2(&mut errors);
|
||||
}
|
||||
fn middle_2(mut errors: impl WriteErrorList<MiddleError>) {
|
||||
errors.push(MiddleError::Middle);
|
||||
}
|
||||
fn lower(mut errors: impl WriteErrorList<LowerError>) {
|
||||
errors.push(LowerError::Lower);
|
||||
}
|
||||
```
|
||||
|
||||
# Motivation
|
||||
|
||||
In most call graphs, a function that encounters an error will early-return and pass an
|
||||
error type to its caller. The caller will often respond by passing that error further up the
|
||||
call stack up to its own caller (possibly after wrapping it in its own error type). That
|
||||
continues so-on-and-so-forth until some caller finally handles the error, returns from `main`,
|
||||
or panics. Ultimately, the result is that some interested caller will receive a linear chain of
|
||||
errors that led to the failure.
|
||||
|
||||
But, not all errors are fatal -- Sometimes, a function might be able to continue working after
|
||||
it encounters an error and still be able to at-least-partially achieve its goals. Calling it
|
||||
again - or calling other functions in the same API - is still permissible and may also result
|
||||
in full or partial functionality.
|
||||
|
||||
In that case, the function may still choose to return `Result::Ok`; however, that leaves the
|
||||
function with a dilemma -- How can it report the non-fatal errors to the caller?
|
||||
|
||||
1. **Return a tuple in its `Result::Ok` type**: that wouldn't capture the non-fatal errors in
|
||||
the case that a fatal error occurs, so it would also have to be added to the `Result::Err`
|
||||
type as well.
|
||||
|
||||
That adds a bunch of boilerplate, as the function needs to allocate the list and map it
|
||||
into the return type for every error return and good return. It also makes the function
|
||||
signature much more noisy.
|
||||
|
||||
2. **Take a list as a mutable reference?**: Better, but now the caller has to allocate the
|
||||
list, and there's no way for it to opt out if it doesn't care about the non-fatal errors.
|
||||
|
||||
3. **Maybe add an `Option` to it?** Okay, so a parameter like `errors: Option<&mut Vec<E>>`?
|
||||
Getting warmer, but now the child has to do a bunch of
|
||||
`if let Some(v) = errors { v.push(error); }` all over the place.
|
||||
|
||||
And what about the caller side of it? For a simple caller, the last point isn't too bad: The
|
||||
caller just has to allocate the list, pass `Some(&mut errors)` to the child, and check it upon
|
||||
return.
|
||||
|
||||
But often, the caller itself is keeping its own list of non-fatal errors and may also be a
|
||||
subfunction to some other caller, and so-on-and-so-forth. In this case, we no longer have
|
||||
a simple chain of errors, but instead we have a tree of errors -- Each level in the tree
|
||||
contains all the non-fatal errors that occurred during execution of a function and all
|
||||
subfunctions in its call graph.
|
||||
|
||||
# Solution
|
||||
|
||||
The main behavior we want is captured by the `WriteErrorList` trait in this crate. It can be
|
||||
passed as a parameter to any function that wants to be able to report non-fatal errors to its
|
||||
caller, and it gives the caller flexibility to decide what it wants to do with that
|
||||
information.
|
||||
|
||||
The main concrete type in this crate is `ErrorList`, which stores a list of a single type of
|
||||
error. Any time a list of errors needs to be stored in memory, this is the type to use. It will
|
||||
usually be created by the top-level caller using `ErrorList::default`, and any subfunction will
|
||||
give an `ErrorList` of its own error type to the `map_fn` that was passed in by its caller upon
|
||||
return.
|
||||
|
||||
However, `ErrorList` should rarely be passed as a parameter to a function, as that wouldn't
|
||||
provide the caller with the flexiblity to decide what strategy it actually wants
|
||||
to use when collecting its subfunction's non-fatal errors. The caller may want to pass direct
|
||||
reference to its own error list, it may want to pass a `Sublist` type that automatically
|
||||
pushes the subfunction's error list to its own error list after mapping, or it may want to
|
||||
pass the `DontCare` type if it doesn't want to know anything about the
|
||||
subfunction's non-fatal errors.
|
||||
|
||||
Instead, subfunctions should take `impl WriteErrorList<E>` as a parameter.
|
||||
This allows any of those types above, as well as mutable references to those types, to be
|
||||
passed in by the caller. This also allows future caller strategies to be implemented, like
|
||||
a caller that only cares how many non-fatal errors occurred but doesn't care about the details.
|
||||
|
||||
# Serde
|
||||
|
||||
(This section only applies if the `serde` feature is enabled)
|
||||
|
||||
`ErrorList` implements the `Serialize` trait if the errors it contains do, and
|
||||
likewise with the `Deserialize` trait. This means that if every error type in the tree
|
||||
implements these traits then the entire tree can be sent over the wire and recreated elsewhere.
|
||||
Very useful if the errors are to be examined remotely!
|
||||
|
||||
386
third_party/rust/error-graph/src/lib.rs
vendored
Normal file
386
third_party/rust/error-graph/src/lib.rs
vendored
Normal file
@@ -0,0 +1,386 @@
|
||||
//! Allows non-fatal errors in a tree of subfunctions to easily be collected by a caller
|
||||
//!
|
||||
//! Provides the [`error_graph::ErrorList<E>`][ErrorList] type to hold a list of non-fatal errors
|
||||
//! that occurred while a function was running.
|
||||
//!
|
||||
//! It has a [`subwriter()`][WriteErrorList::subwriter] method that can be passed as a parameter to
|
||||
//! a subfunction and allows that subfunction to record all the non-fatal errors it encounters.
|
||||
//! When the subfunction is done running, its error list will be mapped to the caller's error type
|
||||
//! and added to the caller's [ErrorList] automatically.
|
||||
//!
|
||||
//! Since subfunctions may in-turn also use the [`subwriter()`][WriteErrorList::subwriter]
|
||||
//! function on the writter given to them by their caller, this creates a tree of non-fatal errors
|
||||
//! that occurred during the execution of an entire call graph.
|
||||
//!
|
||||
//! # Usage
|
||||
//!
|
||||
//! ```
|
||||
//! # use error_graph::{ErrorList, WriteErrorList, strategy::{DontCare, ErrorOccurred}};
|
||||
//! enum UpperError {
|
||||
//! Upper,
|
||||
//! Middle(ErrorList<MiddleError>),
|
||||
//! }
|
||||
//! enum MiddleError {
|
||||
//! Middle,
|
||||
//! Lower(ErrorList<LowerError>),
|
||||
//! }
|
||||
//! enum LowerError {
|
||||
//! Lower,
|
||||
//! }
|
||||
//! fn upper() {
|
||||
//! let mut errors = ErrorList::default();
|
||||
//! errors.push(UpperError::Upper);
|
||||
//! // Map the ErrorList<MiddleError> to our UpperError::Middle variant
|
||||
//! middle(errors.subwriter(UpperError::Middle));
|
||||
//! errors.push(UpperError::Upper);
|
||||
//!
|
||||
//! // Some callers just don't want to know if things went wrong or not
|
||||
//! middle(DontCare);
|
||||
//!
|
||||
//! // Some callers are only interested in whether an error occurred or not
|
||||
//! let mut error_occurred = ErrorOccurred::default();
|
||||
//! middle(&mut error_occurred);
|
||||
//! if error_occurred.as_bool() {
|
||||
//! errors.push(UpperError::Upper);
|
||||
//! }
|
||||
//! }
|
||||
//! fn middle(mut errors: impl WriteErrorList<MiddleError>) {
|
||||
//! // We can pass a sublist by mutable reference if we need to manipulate it before and after
|
||||
//! let mut sublist = errors.sublist(MiddleError::Lower);
|
||||
//! lower(&mut sublist);
|
||||
//! let num_errors = sublist.len();
|
||||
//! sublist.finish();
|
||||
//! if num_errors > 10 {
|
||||
//! errors.push(MiddleError::Middle);
|
||||
//! }
|
||||
//! // We can pass a reference directly to our error list for peer functions
|
||||
//! middle_2(&mut errors);
|
||||
//! }
|
||||
//! fn middle_2(mut errors: impl WriteErrorList<MiddleError>) {
|
||||
//! errors.push(MiddleError::Middle);
|
||||
//! }
|
||||
//! fn lower(mut errors: impl WriteErrorList<LowerError>) {
|
||||
//! errors.push(LowerError::Lower);
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! # Motivation
|
||||
//!
|
||||
//! In most call graphs, a function that encounters an error will early-return and pass an
|
||||
//! error type to its caller. The caller will often respond by passing that error further up the
|
||||
//! call stack up to its own caller (possibly after wrapping it in its own error type). That
|
||||
//! continues so-on-and-so-forth until some caller finally handles the error, returns from `main`,
|
||||
//! or panics. Ultimately, the result is that some interested caller will receive a linear chain of
|
||||
//! errors that led to the failure.
|
||||
//!
|
||||
//! But, not all errors are fatal -- Sometimes, a function might be able to continue working after
|
||||
//! it encounters an error and still be able to at-least-partially achieve its goals. Calling it
|
||||
//! again - or calling other functions in the same API - is still permissible and may also result
|
||||
//! in full or partial functionality.
|
||||
//!
|
||||
//! In that case, the function may still choose to return `Result::Ok`; however, that leaves the
|
||||
//! function with a dilemma -- How can it report the non-fatal errors to the caller?
|
||||
//!
|
||||
//! 1. **Return a tuple in its `Result::Ok` type**: that wouldn't capture the non-fatal errors in
|
||||
//! the case that a fatal error occurs, so it would also have to be added to the `Result::Err`
|
||||
//! type as well.
|
||||
//!
|
||||
//! That adds a bunch of boilerplate, as the function needs to allocate the list and map it
|
||||
//! into the return type for every error return and good return. It also makes the function
|
||||
//! signature much more noisy.
|
||||
//!
|
||||
//! 2. **Take a list as a mutable reference?**: Better, but now the caller has to allocate the
|
||||
//! list, and there's no way for it to opt out if it doesn't care about the non-fatal errors.
|
||||
//!
|
||||
//! 3. **Maybe add an `Option` to it?** Okay, so a parameter like `errors: Option<&mut Vec<E>>`?
|
||||
//! Getting warmer, but now the child has to do a bunch of
|
||||
//! `if let Some(v) = errors { v.push(error); }` all over the place.
|
||||
//!
|
||||
//! And what about the caller side of it? For a simple caller, the last point isn't too bad: The
|
||||
//! caller just has to allocate the list, pass `Some(&mut errors)` to the child, and check it upon
|
||||
//! return.
|
||||
//!
|
||||
//! But often, the caller itself is keeping its own list of non-fatal errors and may also be a
|
||||
//! subfunction to some other caller, and so-on-and-so-forth. In this case, we no longer have
|
||||
//! a simple chain of errors, but instead we have a tree of errors -- Each level in the tree
|
||||
//! contains all the non-fatal errors that occurred during execution of a function and all
|
||||
//! subfunctions in its call graph.
|
||||
//!
|
||||
//! # Solution
|
||||
//!
|
||||
//! The main behavior we want is captured by the [WriteErrorList] trait in this crate. It can be
|
||||
//! passed as a parameter to any function that wants to be able to report non-fatal errors to its
|
||||
//! caller, and it gives the caller flexibility to decide what it wants to do with that
|
||||
//! information.
|
||||
//!
|
||||
//! The main concrete type in this crate is [ErrorList], which stores a list of a single type of
|
||||
//! error. Any time a list of errors needs to be stored in memory, this is the type to use. It will
|
||||
//! usually be created by the top-level caller using [ErrorList::default], and any subfunction will
|
||||
//! give an [ErrorList] of its own error type to the `map_fn` that was passed in by its caller upon
|
||||
//! return.
|
||||
//!
|
||||
//! However, [ErrorList] should rarely be passed as a parameter to a function, as that wouldn't
|
||||
//! provide the caller with the flexiblity to decide what strategy it actually wants
|
||||
//! to use when collecting its subfunction's non-fatal errors. The caller may want to pass direct
|
||||
//! reference to its own error list, it may want to pass a [Sublist] type that automatically
|
||||
//! pushes the subfunction's error list to its own error list after mapping, or it may want to
|
||||
//! pass the [DontCare] type if it doesn't want to know anything about the
|
||||
//! subfunction's non-fatal errors.
|
||||
//!
|
||||
//! Instead, subfunctions should take `impl WriteErrorList<E>` as a parameter.
|
||||
//! This allows any of those types above, as well as mutable references to those types, to be
|
||||
//! passed in by the caller. This also allows future caller strategies to be implemented, like
|
||||
//! a caller that only cares how many non-fatal errors occurred but doesn't care about the details.
|
||||
//!
|
||||
//! # Serde
|
||||
//!
|
||||
//! (This section only applies if the `serde` feature is enabled)
|
||||
//!
|
||||
//! [ErrorList] implements the `Serialize` trait if the errors it contains do, and
|
||||
//! likewise with the `Deserialize` trait. This means that if every error type in the tree
|
||||
//! implements these traits then the entire tree can be sent over the wire and recreated elsewhere.
|
||||
//! Very useful if the errors are to be examined remotely!
|
||||
|
||||
use {
|
||||
std::{
|
||||
error::Error,
|
||||
fmt::{self, Debug, Display, Formatter},
|
||||
},
|
||||
strategy::*,
|
||||
};
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub mod strategy;
|
||||
|
||||
/// Types that are capable of having errors and sublists of errors pushed onto them
|
||||
///
|
||||
/// This is the main trait that allows a function to record a list of non-fatal errors it
|
||||
/// encounters during its execution. Generally, the [WriteErrorList::push] method will be used when
|
||||
/// such an error occurs to record the error and any relevant information.
|
||||
///
|
||||
/// Often, a function will want to call a subfunction and add any non-fatal errors encountered
|
||||
/// to its own list of errors. There are 2 strategies it could use:
|
||||
///
|
||||
/// 1. **Let the subfunction directly push onto its error list** For functions that are at the same
|
||||
/// level of abstraction and use the same error type, it might make the most sense for them
|
||||
/// to just share an error list. In this case, simply pass a mutable reference to the error
|
||||
/// list. For any type that implements this trait, a mutable reference to it implements the
|
||||
/// trait too. This allows a single function to be a composition of a bunch of functions that
|
||||
/// each share a single flat error list.
|
||||
///
|
||||
/// 2. **Map the subfunction's error list to the caller** For a subfunction that is at a different
|
||||
/// level of abstraction than the caller and uses its own error type, this makes the most sense;
|
||||
/// consume the subfunction's entire error list and store it as a single error of the
|
||||
/// caller's higher-level error type. Of course, those subfunctions may implement this same
|
||||
/// strategy for subfunctions they call, creating a hierarchy of errors.
|
||||
///
|
||||
/// In this case, call the [WriteErrorList::subwriter()] function as a parameter to
|
||||
/// the subfunction. If you need to manipulate the list after the subfunction has returned,
|
||||
/// instead call [WriteErrorList::sublist] and pass a mutable reference as a parameter.
|
||||
///
|
||||
/// Function parameters should always prefer to take an object by this trait, and should rarely
|
||||
/// take parameters as concrete types like [ErrorList] or [Sublist].
|
||||
/// Doing so would prevent callers from being able to decide what strategy they want to use
|
||||
/// to merge the subfunction's errors with its own, and would also prevent them from using the
|
||||
/// [DontCare] call if they want to opt out of receiving non-fatal error information.
|
||||
///
|
||||
/// Passing by this trait may also help prevent logic errors: Directly passing a [Sublist] allows
|
||||
/// the subfunction to query the contents of the list it's passed. Functions may incorrectly rely on
|
||||
/// the fact that they are always passed an empty list, and will suddenly break if that assumption
|
||||
/// doesn't hold.
|
||||
pub trait WriteErrorList<E>: Sized + private::Sealed<E> {
|
||||
/// Add an error to the list of errors
|
||||
fn push(&mut self, error: E);
|
||||
/// Create a new mapping error writer with this as its parent
|
||||
///
|
||||
/// Creates a error writer for use by a subfunction. When the subfunction is finished,
|
||||
/// either by explicitly calling [WriteErrorList::finish] or by letting it drop, the list
|
||||
/// of errors it has written using [WriteErrorList::push] will be passed as an
|
||||
/// [`ErrorList<SubErr>`][ErrorList] to the given `map_fn`, which is expected to map it to
|
||||
/// our error type, `E`.
|
||||
///
|
||||
/// Use of this function should always be preferred to [WriteErrorList::sublist] when the
|
||||
/// caller does not need to inspect or manipulate the list returned by the subfunction and
|
||||
/// simply wants to pass it upward to its own caller, as this function will pass forward
|
||||
/// alternate strategies for collecting the errors, like [DontCare] (which turns
|
||||
/// [WriteErrorList::push] into a no-op). In constrast, [WriteErrorList::sublist] actually
|
||||
/// materializes a list that will collect all the errors of all the lists below it, even
|
||||
/// if the caller above it passed in a [DontCare].
|
||||
fn subwriter<'sub, SubMapFn, SubErr: 'sub>(
|
||||
&'sub mut self,
|
||||
map_fn: SubMapFn,
|
||||
) -> impl WriteErrorList<SubErr> + 'sub
|
||||
where
|
||||
SubMapFn: FnOnce(ErrorList<SubErr>) -> E + 'sub;
|
||||
/// Start a new error list with this error list as its parent
|
||||
///
|
||||
/// This works in a very similar manner to [WriteErrorList::subwriter], but it materializes
|
||||
/// an actual concrete [Sublist] type. This function
|
||||
/// should only be used if the function needs to be able to inspect or manipulate the errors
|
||||
/// returned by the subfunction, as it always collects all errors written by the subfunction's
|
||||
/// call graph. Otherwise, [WriteErrorList::subwriter] should be used.
|
||||
fn sublist<SubMapFn, SubErr>(
|
||||
&mut self,
|
||||
map_fn: SubMapFn,
|
||||
) -> Sublist<'_, SubErr, SubMapFn, Self, E>
|
||||
where
|
||||
SubMapFn: FnOnce(ErrorList<SubErr>) -> E,
|
||||
{
|
||||
Sublist::new(map_fn, self)
|
||||
}
|
||||
/// Finish this error list
|
||||
///
|
||||
/// This doesn't normally need to be called, as the [Drop] implementation will take care of
|
||||
/// all the details of cleaning up and ensuring that sublists are mapped up to their parent.
|
||||
///
|
||||
/// This is mostly useful when a caller maintains a binding to a subfunction's error list
|
||||
/// and passes it by mutable reference instead of by value. Before the caller can continue
|
||||
/// to use its own error list, the sublist must release its exclusive reference.
|
||||
///
|
||||
/// This function simply calls [drop()], but it's just a bit more clear about the intent.
|
||||
fn finish(self) {
|
||||
drop(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E, T: WriteErrorList<E>> private::Sealed<E> for &mut T {}
|
||||
|
||||
impl<E, T: WriteErrorList<E>> WriteErrorList<E> for &mut T {
|
||||
fn push(&mut self, error: E) {
|
||||
WriteErrorList::push(*self, error)
|
||||
}
|
||||
fn subwriter<'sub, SubMapFn, SubErr: 'sub>(
|
||||
&'sub mut self,
|
||||
map_fn: SubMapFn,
|
||||
) -> impl WriteErrorList<SubErr> + 'sub
|
||||
where
|
||||
SubMapFn: FnOnce(ErrorList<SubErr>) -> E + 'sub,
|
||||
{
|
||||
WriteErrorList::subwriter(*self, map_fn)
|
||||
}
|
||||
}
|
||||
|
||||
/// The main type that holds a list of errors.
|
||||
///
|
||||
/// See the module-level docs and the docs for [WriteErrorList].
|
||||
#[derive(Debug, Eq, Hash, PartialEq)]
|
||||
pub struct ErrorList<E> {
|
||||
errors: Vec<E>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
impl<E: Serialize> Serialize for ErrorList<E> {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
Serialize::serialize(&self.errors, serializer)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
impl<'de, E: Deserialize<'de>> Deserialize<'de> for ErrorList<E> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
Ok(ErrorList {
|
||||
errors: Deserialize::deserialize(deserializer)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> ErrorList<E> {
|
||||
/// Returns whether the error list is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.errors.is_empty()
|
||||
}
|
||||
/// Return the length of the error list
|
||||
pub fn len(&self) -> usize {
|
||||
self.errors.len()
|
||||
}
|
||||
/// Iterate the error list, returning immutable references
|
||||
pub fn iter<'a>(&'a self) -> impl Iterator<Item = &'a E>
|
||||
where
|
||||
E: 'a,
|
||||
{
|
||||
self.errors.iter()
|
||||
}
|
||||
/// Iterate the error list, returning mutable references
|
||||
pub fn iter_mut<'a>(&'a mut self) -> impl Iterator<Item = &'a mut E>
|
||||
where
|
||||
E: 'a,
|
||||
{
|
||||
self.errors.iter_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> private::Sealed<E> for ErrorList<E> {}
|
||||
|
||||
impl<E> WriteErrorList<E> for ErrorList<E> {
|
||||
fn push(&mut self, error: E) {
|
||||
self.errors.push(error);
|
||||
}
|
||||
fn subwriter<'sub, SubMapFn, SubErr: 'sub>(
|
||||
&'sub mut self,
|
||||
map_fn: SubMapFn,
|
||||
) -> impl WriteErrorList<SubErr> + 'sub
|
||||
where
|
||||
SubMapFn: FnOnce(ErrorList<SubErr>) -> E + 'sub,
|
||||
{
|
||||
self.sublist(map_fn)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> Default for ErrorList<E> {
|
||||
fn default() -> Self {
|
||||
Self { errors: Vec::new() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: Error> Display for ErrorList<E> {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "one or more errors occurred:")?;
|
||||
writeln!(f)?;
|
||||
for (i, e) in self.errors.iter().enumerate() {
|
||||
writeln!(f, " {i}:")?;
|
||||
|
||||
for line in e.to_string().lines() {
|
||||
writeln!(f, " {line}")?;
|
||||
}
|
||||
|
||||
writeln!(f)?;
|
||||
|
||||
let mut source = e.source();
|
||||
while let Some(e) = source {
|
||||
writeln!(f, " caused by:")?;
|
||||
|
||||
for line in e.to_string().lines() {
|
||||
writeln!(f, " {line}")?;
|
||||
}
|
||||
|
||||
writeln!(f)?;
|
||||
|
||||
source = e.source();
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: Error> Error for ErrorList<E> {}
|
||||
|
||||
impl<E> IntoIterator for ErrorList<E> {
|
||||
type Item = <Vec<E> as IntoIterator>::Item;
|
||||
type IntoIter = <Vec<E> as IntoIterator>::IntoIter;
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.errors.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
mod private {
|
||||
/// Prevent users of this crate from implementing traits for their own types
|
||||
pub trait Sealed<E> {}
|
||||
}
|
||||
166
third_party/rust/error-graph/src/strategy.rs
vendored
Normal file
166
third_party/rust/error-graph/src/strategy.rs
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
//! Strategies a caller may use to collect errors from subfunctions
|
||||
//!
|
||||
//! Currently, the strategies contained in this module are:
|
||||
//!
|
||||
//! - [DontCare]: The caller will ignore any non-fatal errors in subfunction.
|
||||
//! [WriteErrorList::push] is effectively a no-op.
|
||||
//!
|
||||
//! - [ErrorOccurred]: Keeps track of a single boolean about whether an error occurred or not.
|
||||
//! [WriteErrorList::push] essentially just sets a flag.
|
||||
//!
|
||||
//! - [Sublist]: A full-fledged list of all non-fatal errors in subfunction. Will be mapped to
|
||||
//! the caller's error type with a map function and pushed into the caller's error list.
|
||||
use {
|
||||
crate::{private, ErrorList, WriteErrorList},
|
||||
std::ops::{Deref, DerefMut},
|
||||
};
|
||||
|
||||
/// A sublist that maps a list of errors into a parent error type
|
||||
///
|
||||
/// When an object of this type is dropped, it will call the given `MapFn` object with a
|
||||
/// [`ErrorList<E>`][ErrorList] containing all the errors that were pushed into it. The map
|
||||
/// function will be used to map that error list to a single `ParentErr` object which will then
|
||||
/// be pushed onto the parent's error list.
|
||||
///
|
||||
/// This type implements [DerefMut] to an [ErrorList], so it can basically be thought of as
|
||||
/// an [ErrorList] with a fancy destructor.
|
||||
pub struct Sublist<'a, E, MapFn, Parent, ParentErr>
|
||||
where
|
||||
MapFn: FnOnce(ErrorList<E>) -> ParentErr,
|
||||
Parent: WriteErrorList<ParentErr>,
|
||||
{
|
||||
list: ErrorList<E>,
|
||||
map_fn_and_parent: Option<(MapFn, &'a mut Parent)>,
|
||||
}
|
||||
|
||||
impl<'a, E, MapFn, Parent, ParentErr> Sublist<'a, E, MapFn, Parent, ParentErr>
|
||||
where
|
||||
MapFn: FnOnce(ErrorList<E>) -> ParentErr,
|
||||
Parent: WriteErrorList<ParentErr>,
|
||||
{
|
||||
/// Create a new sublist that maps a list of subfunction errors to the parent error
|
||||
///
|
||||
/// `map_fn` is a function that accepts an `ErrorList<E>` and returns a `ParentErr`, which
|
||||
/// is then pushed into the parent's error list.
|
||||
///
|
||||
/// It is recommended use [WriteErrorList::sublist] instead of this.
|
||||
pub fn new(map_fn: MapFn, parent: &'a mut Parent) -> Self {
|
||||
Self {
|
||||
list: ErrorList::default(),
|
||||
map_fn_and_parent: Some((map_fn, parent)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, E, MapFn, Parent, ParentErr> Drop for Sublist<'a, E, MapFn, Parent, ParentErr>
|
||||
where
|
||||
MapFn: FnOnce(ErrorList<E>) -> ParentErr,
|
||||
Parent: WriteErrorList<ParentErr>,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
if !self.list.is_empty() {
|
||||
let list = std::mem::take(&mut self.list);
|
||||
let (map_fn, parent) = self.map_fn_and_parent.take().unwrap();
|
||||
let parent_error = map_fn(list);
|
||||
parent.push(parent_error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, E, MapFn, Parent, ParentErr> Deref for Sublist<'a, E, MapFn, Parent, ParentErr>
|
||||
where
|
||||
MapFn: FnOnce(ErrorList<E>) -> ParentErr,
|
||||
Parent: WriteErrorList<ParentErr>,
|
||||
{
|
||||
type Target = ErrorList<E>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.list
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, E, MapFn, Parent, ParentErr> DerefMut for Sublist<'a, E, MapFn, Parent, ParentErr>
|
||||
where
|
||||
MapFn: FnOnce(ErrorList<E>) -> ParentErr,
|
||||
Parent: WriteErrorList<ParentErr>,
|
||||
{
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.list
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, E, MapFn, Parent, ParentErr> private::Sealed<E>
|
||||
for Sublist<'a, E, MapFn, Parent, ParentErr>
|
||||
where
|
||||
MapFn: FnOnce(ErrorList<E>) -> ParentErr,
|
||||
Parent: WriteErrorList<ParentErr>,
|
||||
{
|
||||
}
|
||||
|
||||
impl<'a, E, MapFn, Parent, ParentErr> WriteErrorList<E> for Sublist<'a, E, MapFn, Parent, ParentErr>
|
||||
where
|
||||
MapFn: FnOnce(ErrorList<E>) -> ParentErr,
|
||||
Parent: WriteErrorList<ParentErr>,
|
||||
{
|
||||
fn push(&mut self, error: E) {
|
||||
self.list.push(error)
|
||||
}
|
||||
fn subwriter<'sub, SubMapFn, SubErr: 'sub>(
|
||||
&'sub mut self,
|
||||
map_fn: SubMapFn,
|
||||
) -> impl WriteErrorList<SubErr> + 'sub
|
||||
where
|
||||
SubMapFn: FnOnce(ErrorList<SubErr>) -> E + 'sub,
|
||||
{
|
||||
self.sublist(map_fn)
|
||||
}
|
||||
}
|
||||
|
||||
/// An error list writer that ignores errors
|
||||
///
|
||||
/// Any call to [WriteErrorList::push] does nothing but drop the given error.
|
||||
pub struct DontCare;
|
||||
|
||||
impl<E> private::Sealed<E> for DontCare {}
|
||||
|
||||
impl<E> WriteErrorList<E> for DontCare {
|
||||
fn push(&mut self, _error: E) {}
|
||||
fn subwriter<'sub, SubMapFn, SubErr: 'sub>(
|
||||
&'sub mut self,
|
||||
_map_fn: SubMapFn,
|
||||
) -> impl WriteErrorList<SubErr> + 'sub
|
||||
where
|
||||
SubMapFn: FnOnce(ErrorList<SubErr>) -> E + 'sub,
|
||||
{
|
||||
DontCare
|
||||
}
|
||||
}
|
||||
|
||||
/// An error list writer that only notes that an error occurred
|
||||
///
|
||||
/// [ErrorOccurred::as_bool] will return `true` if the subfunction encountered a non-fatal error
|
||||
/// `false` otherwise
|
||||
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq)]
|
||||
pub struct ErrorOccurred(bool);
|
||||
|
||||
impl ErrorOccurred {
|
||||
pub fn as_bool(&self) -> bool {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> private::Sealed<E> for ErrorOccurred {}
|
||||
|
||||
impl<E> WriteErrorList<E> for ErrorOccurred {
|
||||
fn push(&mut self, _error: E) {
|
||||
self.0 = true;
|
||||
}
|
||||
fn subwriter<'sub, SubMapFn, SubErr: 'sub>(
|
||||
&'sub mut self,
|
||||
_map_fn: SubMapFn,
|
||||
) -> impl WriteErrorList<SubErr> + 'sub
|
||||
where
|
||||
SubMapFn: FnOnce(ErrorList<SubErr>) -> E + 'sub,
|
||||
{
|
||||
self
|
||||
}
|
||||
}
|
||||
147
third_party/rust/error-graph/tests/error_graph.rs
vendored
Normal file
147
third_party/rust/error-graph/tests/error_graph.rs
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
use error_graph::{ErrorList, WriteErrorList};
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
enum UpperError {
|
||||
Upper,
|
||||
Middle(ErrorList<MiddleError>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
enum MiddleError {
|
||||
Middle,
|
||||
Lower(ErrorList<LowerError>),
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
enum LowerError {
|
||||
Lower,
|
||||
}
|
||||
|
||||
fn upper(mut errors: impl WriteErrorList<UpperError>) {
|
||||
errors.push(UpperError::Upper);
|
||||
middle(errors.subwriter(UpperError::Middle));
|
||||
errors.push(UpperError::Upper);
|
||||
}
|
||||
|
||||
fn middle(mut errors: impl WriteErrorList<MiddleError>) {
|
||||
errors.push(MiddleError::Middle);
|
||||
lower(errors.subwriter(MiddleError::Lower));
|
||||
errors.push(MiddleError::Middle);
|
||||
}
|
||||
|
||||
fn lower(mut errors: impl WriteErrorList<LowerError>) {
|
||||
errors.push(LowerError::Lower);
|
||||
}
|
||||
|
||||
fn lower_errors_only(mut errors: impl WriteErrorList<MiddleError>) {
|
||||
lower(errors.subwriter(MiddleError::Lower));
|
||||
}
|
||||
|
||||
fn no_errors(mut errors: impl WriteErrorList<UpperError>) {
|
||||
no_errors_middle(errors.subwriter(UpperError::Middle));
|
||||
}
|
||||
|
||||
fn no_errors_middle(mut errors: impl WriteErrorList<MiddleError>) {
|
||||
no_errors_lower(errors.subwriter(MiddleError::Lower));
|
||||
}
|
||||
|
||||
fn no_errors_lower(mut _errors: impl WriteErrorList<LowerError>) {}
|
||||
|
||||
#[test]
|
||||
fn empty() {
|
||||
let mut errors = ErrorList::default();
|
||||
errors.push(UpperError::Upper);
|
||||
no_errors_middle(errors.subwriter(UpperError::Middle));
|
||||
assert_eq!(errors.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic() {
|
||||
let mut errors = ErrorList::default();
|
||||
upper(&mut errors);
|
||||
|
||||
let mut upper_it = errors.into_iter();
|
||||
assert!(matches!(upper_it.next(), Some(UpperError::Upper)));
|
||||
let Some(UpperError::Middle(middle)) = upper_it.next() else {
|
||||
panic!();
|
||||
};
|
||||
assert!(matches!(upper_it.next(), Some(UpperError::Upper)));
|
||||
|
||||
let mut middle_it = middle.into_iter();
|
||||
assert!(matches!(middle_it.next(), Some(MiddleError::Middle)));
|
||||
let Some(MiddleError::Lower(lower)) = middle_it.next() else {
|
||||
panic!();
|
||||
};
|
||||
assert!(matches!(middle_it.next(), Some(MiddleError::Middle)));
|
||||
|
||||
let mut lower_it = lower.into_iter();
|
||||
assert!(matches!(lower_it.next(), Some(LowerError::Lower)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sublist() {
|
||||
let mut errors = ErrorList::default();
|
||||
errors.push(UpperError::Upper);
|
||||
let mut sublist = errors.sublist(UpperError::Middle);
|
||||
middle(&mut sublist);
|
||||
assert_eq!(sublist.len(), 3);
|
||||
drop(sublist);
|
||||
assert_eq!(errors.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dont_care() {
|
||||
let mut errors = error_graph::strategy::DontCare;
|
||||
errors.push(UpperError::Upper);
|
||||
let mut sublist = errors.sublist(UpperError::Middle);
|
||||
middle(&mut sublist);
|
||||
assert_eq!(sublist.len(), 3);
|
||||
sublist.finish();
|
||||
middle(errors.subwriter(UpperError::Middle));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_occurred() {
|
||||
let mut error_occurred = error_graph::strategy::ErrorOccurred::default();
|
||||
no_errors(&mut error_occurred);
|
||||
assert!(!error_occurred.as_bool());
|
||||
lower_errors_only(&mut error_occurred);
|
||||
assert!(error_occurred.as_bool());
|
||||
let mut error_occurred = error_graph::strategy::ErrorOccurred::default();
|
||||
middle(&mut error_occurred);
|
||||
assert!(error_occurred.as_bool());
|
||||
}
|
||||
|
||||
#[cfg(feature = "serde")]
|
||||
#[test]
|
||||
fn serde() {
|
||||
const EXPECTED_JSON: &str = r#"[
|
||||
"Upper",
|
||||
{
|
||||
"Middle": [
|
||||
"Middle",
|
||||
{
|
||||
"Lower": [
|
||||
"Lower"
|
||||
]
|
||||
},
|
||||
"Middle"
|
||||
]
|
||||
},
|
||||
"Upper"
|
||||
]"#;
|
||||
|
||||
let mut errors = ErrorList::default();
|
||||
upper(&mut errors);
|
||||
let json = serde_json::to_string_pretty(&errors).unwrap();
|
||||
assert_eq!(json.as_str(), EXPECTED_JSON);
|
||||
|
||||
let recreated_errors: ErrorList<UpperError> = serde_json::from_str(&json).unwrap();
|
||||
|
||||
for (left, right) in errors.iter().zip(recreated_errors.iter()) {
|
||||
assert_eq!(left, right);
|
||||
}
|
||||
}
|
||||
1
third_party/rust/failspot/.cargo-checksum.json
vendored
Normal file
1
third_party/rust/failspot/.cargo-checksum.json
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"files":{"Cargo.toml":"050c2c16aa21b10b00a5e5943d22bcf4adbe79fc3c728a188f2558f456a54ff8","LICENSE":"748c15c2a285ef74aec280f2e72bd98482b96bd0e2da0d8f0fde934fa88e2e16","README.md":"4574640bf1b6b6f24b9b70a159d2e291d2dc89621bd4ec378d7ec7ab69fde597","src/lib.rs":"c8a28569145d36e3097407b8c2bef6e96e2d3196f2efb5304055ce2ceafb93ec","src/testing.rs":"abc7299e60c076de162dd5da66fc958780d4a30278db911a54b7867a34b25c1c","tests/failspot.rs":"e7793c44c7113a51312c67d02b1a174c84f5543385fdb07f0e671f27851ea0b2"},"package":"c942e64b20ecd39933d5ff938ca4fdb6ef0d298cc3855b231179a5ef0b24948d"}
|
||||
46
third_party/rust/failspot/Cargo.toml
vendored
Normal file
46
third_party/rust/failspot/Cargo.toml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "failspot"
|
||||
version = "0.2.0"
|
||||
authors = ["Chris Martin <marti4d@live.ca>"]
|
||||
build = false
|
||||
autobins = false
|
||||
autoexamples = false
|
||||
autotests = false
|
||||
autobenches = false
|
||||
description = "A testing library that makes it easy(ish) to add intentional errors to a program"
|
||||
readme = "README.md"
|
||||
keywords = [
|
||||
"error",
|
||||
"error-handling",
|
||||
"testing",
|
||||
]
|
||||
categories = ["development-tools::testing"]
|
||||
license = "MIT"
|
||||
repository = "https://github.com/marti4d/failspot"
|
||||
|
||||
[lib]
|
||||
name = "failspot"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[test]]
|
||||
name = "failspot"
|
||||
path = "tests/failspot.rs"
|
||||
|
||||
[dependencies.flagset]
|
||||
version = "0.4.6"
|
||||
optional = true
|
||||
|
||||
[features]
|
||||
enabled = ["dep:flagset"]
|
||||
5
third_party/rust/failspot/LICENSE
vendored
Normal file
5
third_party/rust/failspot/LICENSE
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
11
third_party/rust/failspot/README.md
vendored
Normal file
11
third_party/rust/failspot/README.md
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# failspot
|
||||
|
||||
[](https://crates.io/crates/failspot)
|
||||
[](https://docs.rs/failspot)
|
||||

|
||||
|
||||
A testing library that makes it easy(ish) to add intentional errors to a program
|
||||
|
||||
When testing error-handling codepaths, it is often useful to programmatically tell parts of the code to fail. This
|
||||
crate provides the `failspot!()` macro, which can be used to mark a spot in the codepath where an intentional failure
|
||||
can be toggled on and off from testing code.
|
||||
327
third_party/rust/failspot/src/lib.rs
vendored
Normal file
327
third_party/rust/failspot/src/lib.rs
vendored
Normal file
@@ -0,0 +1,327 @@
|
||||
//! A testing library that makes it easy(ish) to add intentional errors to a program
|
||||
//!
|
||||
//! When testing error-handling codepaths, it is often useful to programmatically tell parts of the
|
||||
//! code to fail. This crate provides the [`failspot!()`][failspot] macro, which can be used
|
||||
//! to mark a spot in the codepath where an intentional failure can be toggled on and off
|
||||
//! from testing code.
|
||||
//!
|
||||
//! Adding it to code is fairly simple:
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use {failspot::failspot, std::{error::Error, fs, io, path::Path}};
|
||||
//! # failspot::failspot_name! { pub enum FailSpotName { FailDataRead } }
|
||||
//! # fn main() {
|
||||
//! # read_data_file("/tmp/foo".as_ref());
|
||||
//! # }
|
||||
//! fn read_data_file(path: &Path) -> Result<Vec<u8>, Box<dyn Error>> {
|
||||
//! // `FailDataRead` is a variant of an enum that was declared in our crate
|
||||
//! // `bail` returns `Err` and does type conversion on the error type
|
||||
//! failspot!(FailDataRead bail(io::Error::other("failed due to test config")));
|
||||
//! Ok(fs::read(path)?)
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! The [`failspot_name!()`][failspot_name] macro is used to declare an enum that can be used
|
||||
//! to name a failspot. Its syntax is identical to a regular enum declaration:
|
||||
//!
|
||||
//! ```no_run
|
||||
//! # use failspot::failspot_name;
|
||||
//! failspot_name! {
|
||||
//! pub enum FailSpotName {
|
||||
//! StuffWentWrong,
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! # Syntaxes
|
||||
//!
|
||||
//! The [`failspot!()`][failspot] macro has **four main syntaxes**. Each takes either a
|
||||
//! **long form** or a **short form**.
|
||||
//!
|
||||
//! The **long form** explicitly contains the path to the enum containing the failspot names. This
|
||||
//! is useful if there are multiple enums with different failspot names, or if the enum is only
|
||||
//! accessible at a non-standard name or path.
|
||||
//!
|
||||
//! If the enum can be reached at the name `crate::FailSpotName` (either because it was declared in
|
||||
//! the crate root or re-exported there), the **short form** versions of these macros
|
||||
//! can be used. The examples below make this clear:
|
||||
//!
|
||||
//! ## "if-else" syntax (long form)
|
||||
//!
|
||||
//! Useful when standard if-else behavior is desired:
|
||||
//!
|
||||
//! ```
|
||||
//! # use {failspot::failspot};
|
||||
//! # pub mod my_module {
|
||||
//! # failspot::failspot_name! { pub enum MyFailName { FailDataRead } }
|
||||
//! # }
|
||||
//! # fn main() {
|
||||
//! // In "long form", the entire path to the enum must be spelled out
|
||||
//! let _enabled = failspot!(if <crate::my_module::MyFailName>::FailDataRead {
|
||||
//! println!("Data read failure enabled");
|
||||
//! true
|
||||
//! } else {
|
||||
//! println!("Data read failure disabled");
|
||||
//! false
|
||||
//! });
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! When the `enabled` feature is not on, the compiler will see the second block verbatim.
|
||||
//!
|
||||
//! ### The same code with the short-form
|
||||
//!
|
||||
//! If an enum named `FailSpotName` is reachable at the crate root, like this:
|
||||
//!
|
||||
//! ```
|
||||
//! # use failspot::failspot_name;
|
||||
//! // lib.rs
|
||||
//! failspot_name! {
|
||||
//! pub enum FailSpotName {
|
||||
//! FailDataRead,
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! The short form can be used, like this:
|
||||
//!
|
||||
//! ```
|
||||
//! # use failspot::{failspot, failspot_name};
|
||||
//! # failspot_name! { pub enum FailSpotName { FailDataRead } }
|
||||
//! # fn main() {
|
||||
//! let _enabled = failspot!(if FailDataRead {
|
||||
//! println!("Data read failure enabled");
|
||||
//! true
|
||||
//! } else {
|
||||
//! println!("Data read failure disabled");
|
||||
//! false
|
||||
//! });
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! ## "quick expression" syntax
|
||||
//!
|
||||
//! Useful when a short block of syntax should be evaluated when the failspot is enabled:
|
||||
//!
|
||||
//! ```
|
||||
//! # use {failspot::failspot};
|
||||
//! # failspot::failspot_name! { pub enum FailSpotName { FailDataRead } }
|
||||
//! # fn main() {
|
||||
//! failspot!(FailDataRead println!("Data read failure enabled"));
|
||||
//!
|
||||
//! // Also good for panicking
|
||||
//! failspot!(FailDataRead panic!());
|
||||
//!
|
||||
//! // Or for just early returning
|
||||
//! failspot!(FailDataRead return);
|
||||
//!
|
||||
//! // Multiple statements can be run, but use sparingly as things start to get ugly.
|
||||
//! failspot!(FailDataRead println!("Data read failure enabled"); return);
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! When the `enabled` feature is not on, the macro will evaluate to the empty block, `{}`.
|
||||
//!
|
||||
//! ## "bail" syntax
|
||||
//!
|
||||
//! Useful for returning an `Err` with error-type conversion:
|
||||
//!
|
||||
//! ```
|
||||
//! # use {failspot::failspot, std::error::Error};
|
||||
//! # failspot::failspot_name! { pub enum FailSpotName { FailDataRead } }
|
||||
//! fn main() -> Result<(), Box<dyn Error>> {
|
||||
//! failspot!(FailDataRead bail(std::io::Error::other("Data read failure enabled")));
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! When the `enabled` feature is not on, the macro will evaluate to the empty block, `{}`.
|
||||
//!
|
||||
//! ## "bool" syntax
|
||||
//!
|
||||
//! Useful to just evaluate whether the failspot is enabled or not:
|
||||
//!
|
||||
//! ```
|
||||
//! # use {failspot::failspot, std::error::Error};
|
||||
//! # failspot::failspot_name! { pub enum FailSpotName { FailDataRead } }
|
||||
//! # fn main() {
|
||||
//! # let bytes_to_read = 5;
|
||||
//! let fail_the_read = bytes_to_read > 5000 || failspot!(FailDataRead);
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! When the `enabled` feature is not on, the macro will evaluate to the token `false`.
|
||||
|
||||
#![cfg_attr(
|
||||
feature = "enabled",
|
||||
doc = r#"
|
||||
# Testing code
|
||||
|
||||
Testing code should see the documentation for the [testing] module."#
|
||||
)]
|
||||
#![forbid(missing_docs)]
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
pub mod testing;
|
||||
|
||||
/// Declares a spot that can trigger intentional failures
|
||||
///
|
||||
/// See the [crate-level docs][crate] for details.
|
||||
#[cfg(feature = "enabled")]
|
||||
#[allow(clippy::crate_in_macro_def)]
|
||||
#[macro_export]
|
||||
macro_rules! failspot {
|
||||
(
|
||||
if <$e: ty>::$n: ident {
|
||||
$($enabled: tt)*
|
||||
} $(else {
|
||||
$($disabled: tt)*
|
||||
})?
|
||||
) => {{
|
||||
if $crate::failspot!(<$e>::$n) {
|
||||
$($enabled)*
|
||||
} $(else {
|
||||
$($disabled)*
|
||||
})?
|
||||
}};
|
||||
|
||||
(
|
||||
if $n: ident {
|
||||
$($enabled: tt)*
|
||||
} $(else {
|
||||
$($disabled: tt)*
|
||||
})?
|
||||
) => {
|
||||
$crate::failspot!(
|
||||
if <crate::FailSpotName>::$n {
|
||||
$($enabled)*
|
||||
} $(else {
|
||||
$($disabled)*
|
||||
})?
|
||||
)
|
||||
};
|
||||
|
||||
(<$e: ty>::$n: ident bail($err: expr)) => {{
|
||||
if $crate::failspot!(<$e>::$n) {
|
||||
return Err($err.into());
|
||||
}
|
||||
}};
|
||||
($n: ident bail($err: expr)) => {
|
||||
$crate::failspot!(<crate::FailSpotName>::$n bail($err))
|
||||
};
|
||||
(<$e: ty>::$n: ident) => {{
|
||||
<$e>::enabled(<$e>::$n)
|
||||
}};
|
||||
($n: ident) => {
|
||||
$crate::failspot!(<crate::FailSpotName>::$n)
|
||||
};
|
||||
(<$e: ty>::$n: ident $($enabled: tt)+) => {{
|
||||
if $crate::failspot!(<$e>::$n) {
|
||||
$($enabled)+
|
||||
}
|
||||
}};
|
||||
($n: ident $($enabled: tt)+) => {
|
||||
$crate::failspot!(<crate::FailSpotName>::$n $($enabled)+)
|
||||
}}
|
||||
|
||||
/// Declares a spot that can trigger intentional failures
|
||||
///
|
||||
/// See the [crate-level docs][crate] for details.
|
||||
#[cfg(not(feature = "enabled"))]
|
||||
#[allow(clippy::crate_in_macro_def)]
|
||||
#[macro_export]
|
||||
macro_rules! failspot {
|
||||
(
|
||||
if <$e: ty>::$n: ident {
|
||||
$($enabled: tt)*
|
||||
} $(else {
|
||||
$($disabled: tt)*
|
||||
})?
|
||||
) => {{$($($disabled)*)?}};
|
||||
|
||||
(
|
||||
if $n: ident {
|
||||
$($enabled: tt)*
|
||||
} $(else {
|
||||
$($disabled: tt)*
|
||||
})?
|
||||
) => {
|
||||
$crate::failspot!(
|
||||
if <crate::FailSpotName>::$n {
|
||||
$($enabled)*
|
||||
} $(else {
|
||||
$($disabled)*
|
||||
})?
|
||||
)
|
||||
};
|
||||
(<$e: ty>::$n: ident bail($err: expr)) => {{}};
|
||||
($n: ident bail($err: expr)) => {
|
||||
$crate::failspot!(<crate::FailSpotName>::$n bail($err))
|
||||
};
|
||||
(<$e: ty>::$n: ident) => {false};
|
||||
($n: ident) => {
|
||||
$crate::failspot!(<crate::FailSpotName>::$n)
|
||||
};
|
||||
(<$e: ty>::$n: ident $($enabled: tt)+) => {{}};
|
||||
($n: ident $($enabled: tt)+) => {
|
||||
$crate::failspot!(<crate::FailSpotName>::$n $($enabled)+)
|
||||
}}
|
||||
|
||||
/// Declares an enum that can be used as a name for a [failspot]
|
||||
///
|
||||
/// When feature `enabled` is off, this macro does nothing.
|
||||
///
|
||||
/// See the [crate-level docs][crate] for details.
|
||||
#[cfg(feature = "enabled")]
|
||||
#[macro_export]
|
||||
macro_rules! failspot_name {{
|
||||
$(#[$m:meta])*
|
||||
$p:vis enum $n:ident {
|
||||
$(
|
||||
$(#[$a:meta])*
|
||||
$k:ident
|
||||
),+ $(,)*
|
||||
}
|
||||
} => {
|
||||
$crate::flagset::flags! {
|
||||
$(#[$m])*
|
||||
$p enum $n: usize {
|
||||
$(
|
||||
$(#[$a])*
|
||||
$k
|
||||
),+
|
||||
}
|
||||
}
|
||||
$crate::failspot_global!($n);
|
||||
}}
|
||||
|
||||
/// Declares an enum that can be used as a name for a [failspot]
|
||||
///
|
||||
/// When feature `enabled` is off, this macro does nothing.
|
||||
///
|
||||
/// See the [crate-level docs][crate] for details.
|
||||
#[cfg(not(feature = "enabled"))]
|
||||
#[macro_export]
|
||||
macro_rules! failspot_name(($($t: tt)*) => ());
|
||||
|
||||
#[doc(hidden)]
|
||||
#[macro_export]
|
||||
macro_rules! failspot_global(($n: ident) => {
|
||||
impl $n {
|
||||
pub fn enabled(name: Self) -> bool {
|
||||
Self::global_config().enabled(name)
|
||||
}
|
||||
pub fn testing_client() -> $crate::testing::Client<'static, $n> {
|
||||
Self::global_config().client()
|
||||
}
|
||||
fn global_config() -> &'static $crate::testing::Config<$n> {
|
||||
static GLOBAL: std::sync::LazyLock<$crate::testing::Config<$n>> =
|
||||
std::sync::LazyLock::new($crate::testing::Config::default);
|
||||
&GLOBAL
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
#[doc(hidden)]
|
||||
pub use flagset;
|
||||
172
third_party/rust/failspot/src/testing.rs
vendored
Normal file
172
third_party/rust/failspot/src/testing.rs
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
//! Allows testing code to enable and disable failspots
|
||||
//!
|
||||
//! When testing a crate, failspots can be enabled through a [Client] object. This can be retrieved
|
||||
//! by using the `fn testing_client() -> Client<'static, Self>` method that exists as part of
|
||||
//! every enum that was declared using the [`failspot_name!()`][crate::failspot_name] macro.
|
||||
//!
|
||||
//! The [`Client::set_enabled()`][Client::set_enabled] method can be used to set or unset a
|
||||
//! failspot. [`Client::reset()`][Client::reset] will unset all failspots.
|
||||
//!
|
||||
//! Example usage:
|
||||
//!
|
||||
//! ```
|
||||
//! # failspot::failspot_name! { pub enum FailSpotName { Name1 } }
|
||||
//! # fn run_tests() {}
|
||||
//! let mut client = FailSpotName::testing_client();
|
||||
//! client.set_enabled(FailSpotName::Name1, true);
|
||||
//! // When the `Client` object drops, all the failspots will be reset to disabled
|
||||
//! // Must ensure it stays alive while tests are running.
|
||||
//! run_tests();
|
||||
//! ```
|
||||
//!
|
||||
//! # Concurrency -- Important!!!
|
||||
//!
|
||||
//! **TL;DR -- Put all your integration tests that use failspot in a separate source file!**
|
||||
//!
|
||||
//! ## The problem
|
||||
//!
|
||||
//! In Rust, **tests are run concurrently by default**. Since the configuration for the failspots
|
||||
//! is a global variable that will be shared by all threads, that would create a problem -- Tests
|
||||
//! that don't use failspots will suddenly start failing because another concurrent test enabled
|
||||
//! them, and tests that do use failspots would clobber each other's configuration.
|
||||
//!
|
||||
//! To prevent this, the [Client] returned by `testing_client()` is **protected by a mutex** --
|
||||
//! Only one test at a time can configure the failspots through the `Client` methods. When the
|
||||
//! client is dropped, all the failspots are reset to disabled state and the mutex is released so
|
||||
//! the next test can start with a fresh state.
|
||||
//!
|
||||
//! This means **every test that may run concurrently with a failspot test must hold the [Client]
|
||||
//! object the entire time the test is running**, even if that test doesn't actually use failspots.
|
||||
//! If there are multiple enums declared with [`failspot_name!()`][crate::failspot_name] then a
|
||||
//! [Client] object for each enum must be held by every test that may run concurrently.
|
||||
//!
|
||||
//! For tests that use failspots, this is intuitive -- Most tests that use failspots will create a
|
||||
//! [Client] as part of their setup.
|
||||
//!
|
||||
//! ## Stopping regular tests from breaking
|
||||
//!
|
||||
//! For tests that don't use failspots, there are 2 choices:
|
||||
//!
|
||||
//! 1. **Put failspot tests in their own source file (recommended)** Integration tests in
|
||||
//! different source files are run in different processes, so separating failspot and non
|
||||
//! failspot tests eliminates the concurrency issue.
|
||||
//!
|
||||
//! 2. **Force tests to run serially** By setting `RUST_TEST_THREADS=1` in the enviroment, the
|
||||
//! tests will run one-at-a-time and there will be no interference.
|
||||
//!
|
||||
//! Obviously, the first one should be preferred unless there is a good reason not to.
|
||||
|
||||
use {
|
||||
flagset::FlagSet,
|
||||
std::{
|
||||
ops::{Deref, DerefMut},
|
||||
sync::{Mutex, MutexGuard, RwLock},
|
||||
},
|
||||
};
|
||||
|
||||
/// Config object for an enum declared with [`failspot_name!()`][crate::failspot_name]
|
||||
///
|
||||
/// Every failspot enum has one of these attached. It tracks which failspots are currently
|
||||
/// enabled for that enum, and contains the mutex that ensures that only one [Client] at a time
|
||||
/// is running. It is not normally used directly by user code, but is instead used by the
|
||||
/// [`failspot!()`][crate::failspot] macro for testing failpoints, and by the `testing_client()`
|
||||
/// method to obtain a [Client] for testing code.
|
||||
#[derive(Debug)]
|
||||
pub struct Config<T: flagset::Flags> {
|
||||
inner: RwLock<ConfigInner<T>>,
|
||||
client_mutex: Mutex<()>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
struct ConfigInner<T: flagset::Flags> {
|
||||
enabled_spots: FlagSet<T>,
|
||||
}
|
||||
|
||||
impl<T: flagset::Flags> Default for Config<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inner: Default::default(),
|
||||
client_mutex: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: flagset::Flags> Default for ConfigInner<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled_spots: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: flagset::Flags> Config<T> {
|
||||
/// Returns whether or not the given failspot is enabled
|
||||
pub fn enabled(&self, spot: T) -> bool {
|
||||
self.inner().enabled_spots.contains(spot)
|
||||
}
|
||||
/// Returns a client for this failspot config
|
||||
pub fn client(&self) -> Client<'_, T> {
|
||||
Client::new(self)
|
||||
}
|
||||
fn inner(&self) -> impl Deref<Target = ConfigInner<T>> + '_ {
|
||||
self.inner.read().unwrap()
|
||||
}
|
||||
fn inner_mut(&self) -> impl DerefMut<Target = ConfigInner<T>> + '_ {
|
||||
self.inner.write().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Client for testing code
|
||||
///
|
||||
/// See [module-level docs][self], especially the part about concurrency.
|
||||
#[derive(Debug)]
|
||||
pub struct Client<'a, T: flagset::Flags> {
|
||||
config: &'a Config<T>,
|
||||
_guard: MutexGuard<'a, ()>,
|
||||
}
|
||||
|
||||
impl<'a, T: flagset::Flags> Client<'a, T> {
|
||||
/// Create a new [Client].
|
||||
///
|
||||
/// Normally not used directly -- Use `EnumName::testing_client()` instead
|
||||
pub fn new(config: &'a Config<T>) -> Self {
|
||||
let _guard = config
|
||||
.client_mutex
|
||||
.lock()
|
||||
.unwrap_or_else(|e| e.into_inner());
|
||||
|
||||
assert_eq!(
|
||||
*config.inner(),
|
||||
ConfigInner::default(),
|
||||
"somehow failed to reset config to default after last client"
|
||||
);
|
||||
|
||||
Client { config, _guard }
|
||||
}
|
||||
/// Set whether the given failspot is enabled or disabled
|
||||
pub fn set_enabled(&mut self, spot: T, enabled: bool) -> &mut Self {
|
||||
if enabled {
|
||||
self.config.inner_mut().enabled_spots |= spot;
|
||||
} else {
|
||||
self.config.inner_mut().enabled_spots -= spot;
|
||||
}
|
||||
self
|
||||
}
|
||||
/// Reset all failspots to disabled
|
||||
pub fn reset(&mut self) -> &mut Self {
|
||||
*self.config.inner_mut() = ConfigInner::default();
|
||||
self
|
||||
}
|
||||
/// Finish with a [Client], resetting all failspots to disabled and releasing the mutex
|
||||
///
|
||||
/// Identical to dropping the [Client], but a bit more explicit about intent.
|
||||
pub fn finish(self) {
|
||||
drop(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: flagset::Flags> Drop for Client<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
self.reset();
|
||||
}
|
||||
}
|
||||
152
third_party/rust/failspot/tests/failspot.rs
vendored
Normal file
152
third_party/rust/failspot/tests/failspot.rs
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
#![cfg_attr(not(feature = "enabled"), allow(dead_code))]
|
||||
|
||||
failspot::failspot_name! {
|
||||
pub enum FailSpotName {
|
||||
One,
|
||||
Two,
|
||||
Three,
|
||||
Four,
|
||||
}
|
||||
}
|
||||
|
||||
mod inner {
|
||||
failspot::failspot_name! {
|
||||
pub enum FailSpotName2 {
|
||||
Four,
|
||||
Five,
|
||||
Six,
|
||||
Seven,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn stop_process_or_fail() -> Result<(), Box<dyn std::error::Error>> {
|
||||
failspot::failspot!(One bail(std::io::Error::other("fail")));
|
||||
failspot::failspot!(<inner::FailSpotName2>::Four bail(std::io::Error::other("fail")));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fill_missing_auxv_or_panic() {
|
||||
failspot::failspot!(Two panic!());
|
||||
failspot::failspot!(<inner::FailSpotName2>::Five panic!());
|
||||
}
|
||||
|
||||
fn get_thread_name_or_none() -> Option<String> {
|
||||
if failspot::failspot!(Three) {
|
||||
return None;
|
||||
}
|
||||
if failspot::failspot!(<inner::FailSpotName2>::Six) {
|
||||
return None;
|
||||
}
|
||||
Some("mythread".to_string())
|
||||
}
|
||||
|
||||
fn common() -> Result<Option<String>, Box<dyn std::error::Error>> {
|
||||
stop_process_or_fail()?;
|
||||
fill_missing_auxv_or_panic();
|
||||
let name = get_thread_name_or_none();
|
||||
|
||||
let name = failspot::failspot!(if Four {
|
||||
name.map(|_| "deleted".to_string())
|
||||
} else {
|
||||
name
|
||||
});
|
||||
|
||||
let name = name.map(|name| {
|
||||
failspot::failspot!(if <inner::FailSpotName2>::Seven {
|
||||
"deleted".to_string()
|
||||
} else {
|
||||
name
|
||||
})
|
||||
});
|
||||
|
||||
Ok(name)
|
||||
}
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
#[test]
|
||||
fn no_fails() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let _client = FailSpotName::testing_client();
|
||||
let _client2 = inner::FailSpotName2::testing_client();
|
||||
let thread_name = common()?;
|
||||
assert_eq!(thread_name.as_deref(), Some("mythread"));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn panic_fail() {
|
||||
let mut client = FailSpotName::testing_client();
|
||||
let _client2 = inner::FailSpotName2::testing_client();
|
||||
client.set_enabled(FailSpotName::Two, true);
|
||||
let _ = common();
|
||||
}
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn panic_fail2() {
|
||||
let mut _client = FailSpotName::testing_client();
|
||||
let mut client2 = inner::FailSpotName2::testing_client();
|
||||
client2.set_enabled(inner::FailSpotName2::Five, true);
|
||||
let _ = common();
|
||||
}
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
#[test]
|
||||
fn test_error() {
|
||||
let mut client = FailSpotName::testing_client();
|
||||
let mut _client2 = inner::FailSpotName2::testing_client();
|
||||
client.set_enabled(FailSpotName::One, true);
|
||||
common().unwrap_err();
|
||||
}
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
#[test]
|
||||
fn test_error2() {
|
||||
let mut _client = FailSpotName::testing_client();
|
||||
let mut client2 = inner::FailSpotName2::testing_client();
|
||||
client2.set_enabled(inner::FailSpotName2::Four, true);
|
||||
common().unwrap_err();
|
||||
}
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
#[test]
|
||||
fn expression_fail() {
|
||||
let mut client = FailSpotName::testing_client();
|
||||
let mut _client2 = inner::FailSpotName2::testing_client();
|
||||
client.set_enabled(FailSpotName::Three, true);
|
||||
let thread_name = common().unwrap();
|
||||
assert!(thread_name.is_none());
|
||||
}
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
#[test]
|
||||
fn expression_fail2() {
|
||||
let mut _client = FailSpotName::testing_client();
|
||||
let mut client2 = inner::FailSpotName2::testing_client();
|
||||
client2.set_enabled(inner::FailSpotName2::Six, true);
|
||||
let thread_name = common().unwrap();
|
||||
assert!(thread_name.is_none());
|
||||
}
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
#[test]
|
||||
fn if_statement_fail() {
|
||||
let mut client = FailSpotName::testing_client();
|
||||
let mut _client2 = inner::FailSpotName2::testing_client();
|
||||
client.set_enabled(FailSpotName::Four, true);
|
||||
let thread_name = common().unwrap();
|
||||
assert!(thread_name.as_deref() == Some("deleted"));
|
||||
}
|
||||
|
||||
#[cfg(feature = "enabled")]
|
||||
#[test]
|
||||
fn if_statement_fail2() {
|
||||
let mut _client = FailSpotName::testing_client();
|
||||
let mut client2 = inner::FailSpotName2::testing_client();
|
||||
client2.set_enabled(inner::FailSpotName2::Seven, true);
|
||||
let thread_name = common().unwrap();
|
||||
assert!(thread_name.as_deref() == Some("deleted"));
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"6b9b3906662b434710edcd87739806a1b4d1312794f969b50e50705025c9d611","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"9ec2734f45b0d65192b9fee2307e05176b805a19efa994a553dcc5b2d3219a1e","Readme.md":"f1c7d3ed5b9ec8dbc9f87d742b213573d0667ca4b4d148cf1e72681115093e67","src/aarch64/arch.rs":"894d1d66ba487363cdf5f2dd66c20ff6560e3906e7c6ecd80e5fc11a682fa5d9","src/aarch64/cache.rs":"88bfc7ee6d38bd0a0fb5532f29fbcec88fd6a222db0e1a3398e0168d7c05d82c","src/aarch64/dwarf.rs":"800bb304e8d746fb1857c2da5486f278242103ea7eac45b1cdc135b1eb3b92f1","src/aarch64/instruction_analysis/epilogue.rs":"fe45d3fbb92dc7224526fb1e613019ebf36a9f2aa3f8fb52217d20744fbda3ae","src/aarch64/instruction_analysis/mod.rs":"6298b1c5b96a5ac8b1ca39a9764b1b71af8ca4a60b6a435e7404e0b02e700f6a","src/aarch64/instruction_analysis/prologue.rs":"065172ee6e2cb868c76dad0d704f0da15397e94424cb0c5522e4bffcae1b0f19","src/aarch64/macho.rs":"ec88fb0c02707d3d96a41f22bb2f698909af26b41ac9cca6b0244e837e240504","src/aarch64/mod.rs":"a94c4c0b1d3e08bce5b0baf9a6ba1b59f42da2809ce970b8a9050b9c3c46e00a","src/aarch64/pe.rs":"6800dfee18cb8eb96d8802c4a175cfca511d9503a7b6c09d0ce7e84c28d8a1a8","src/aarch64/unwind_rule.rs":"1119387590f16f4582672095f6c9462a94e3d4eaf448baa19c432c5e57fa055d","src/aarch64/unwinder.rs":"1dd24b21a49cf1b2fdcb5fada2afb54b2df269d3560be1e1f0063604593f26f1","src/aarch64/unwindregs.rs":"19e5fd82d62eac135c9075e75c0b031f3037a4b670060b3bc6746ef6d71685f8","src/add_signed.rs":"8c52b1d7c7dfc5cbdd477ff9dcce2e888f663a19e8ef6b89c209c06f7a532203","src/arch.rs":"f7dff12cdc2cf91986a5cb3c8d492f608264bd789841a0cfab1c7042233f0488","src/cache.rs":"90569eba164d72c3d20a0465d05a92bc35ceba38c21b944ced3c759f61be3268","src/code_address.rs":"1e2bd03a5813c0100171c7020dc05d8457e2670c7ef58c0c4e3627bf1d82f1b1","src/display_utils.rs":"2f874fd4778e7e304365d8b65d60dc4a5a8fa5ee2715740dc64e334989a1276d","src/dwarf.rs":"79689d0d16a5ccdb5a6c90d690602d1b9bb0100543c2922b47a4c5715004c581","src/error.rs":"bbcaa2ede65b465bff515e19c50f4a8b76c4fcb481297a50427fd21689121294","src/instruction_analysis.rs":"1023577c008a71805338cd45b8582774dd8c69c7bb349990992733297761743e","src/lib.rs":"f57770c147c5de29b4a3600675b459ce26925ad8c5be19ab0c9545883a7a9320","src/macho.rs":"472cd64d0ef4c4d7b91f3d19307875f61db67de52273fef186da9ede89016982","src/pe.rs":"d50f13dd153d124c3b76df7930295e2310381e7490d45563382775422a858bfe","src/rule_cache.rs":"d764fe5e9202314b77e536a7ebe7cb4d574494eeaeb76d2e7a13ff6b0770cf3b","src/unwind_result.rs":"ec6898d9e66b455978880884199d5570fd220c7d3d1101c6b854b9a2b6cea88d","src/unwind_rule.rs":"3335e0d2af34961ba4eff2d89db6bdde5950909f352539e96852c42b3ca16139","src/unwinder.rs":"1ccd6b02770ed54f8837615cd0da02be75e92da9db304e17a14b6cf8f36dd3e0","src/x86_64/arch.rs":"12ea62c70058eac1c2aa698594cc83fafc5d8ec7205596c4b6f6ff325bd1ed8d","src/x86_64/cache.rs":"57eecbc7a0eea21269ba87e80efd985b13d420b2546722ae1b7c73e2e1731169","src/x86_64/dwarf.rs":"6643cc16ac524c325c02ae3a980dd95da38f660328d7b75c1081454b85e24925","src/x86_64/instruction_analysis/epilogue.rs":"21b98f794ec11d501497904b352017d678ea57a2a1f1617a625b1044de1c79c5","src/x86_64/instruction_analysis/mod.rs":"df9089f73861574607dab07fda68b8c5bf1ff426401840a6c35503bda9996143","src/x86_64/instruction_analysis/prologue.rs":"57f2a9376a70ca708c0d9c85bd324edff8062f73102aa57a9c6319627d8189ad","src/x86_64/macho.rs":"1b8eb6622d36115ac664c54d2a8768cbadd17bdcf252e368cf5ea8a35339d5b9","src/x86_64/mod.rs":"160ad03cce68b6263028fa9eaf460a89fee57795a81adac8bed9c7d4fdf0ebad","src/x86_64/pe.rs":"25d850fc896e635831c90c1e4073900b32b02fff88858aa579a986aa4052d54e","src/x86_64/register_ordering.rs":"e4e01b5506eaf1d448874e10930035d4a67b8077803e2ceee305d9c5aa88cd2f","src/x86_64/unwind_rule.rs":"f5be036172ac469cbc8248047411cbd43931a1def52cc9fcacce54210abc9824","src/x86_64/unwinder.rs":"2d7228655cc427266e31f1405f44b9e81bb119b9eb0f4abb9a29b39697db2b44","src/x86_64/unwindregs.rs":"63b358fe31b613d456982360ff659927d540b502f9c1a3145c4ba66beb4afdfc"},"package":"0fd28d2036d4fd99e3629487baca659e5af1c5d554e320168613be79028610fc"}
|
||||
{"files":{"Cargo.toml":"9899c522be4b56e17638b19aa8ee37132f5654b32cba8bfe73e3f6929975ffad","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"9ec2734f45b0d65192b9fee2307e05176b805a19efa994a553dcc5b2d3219a1e","Readme.md":"f1c7d3ed5b9ec8dbc9f87d742b213573d0667ca4b4d148cf1e72681115093e67","src/aarch64/arch.rs":"894d1d66ba487363cdf5f2dd66c20ff6560e3906e7c6ecd80e5fc11a682fa5d9","src/aarch64/cache.rs":"88bfc7ee6d38bd0a0fb5532f29fbcec88fd6a222db0e1a3398e0168d7c05d82c","src/aarch64/dwarf.rs":"800bb304e8d746fb1857c2da5486f278242103ea7eac45b1cdc135b1eb3b92f1","src/aarch64/instruction_analysis/epilogue.rs":"fe45d3fbb92dc7224526fb1e613019ebf36a9f2aa3f8fb52217d20744fbda3ae","src/aarch64/instruction_analysis/mod.rs":"6298b1c5b96a5ac8b1ca39a9764b1b71af8ca4a60b6a435e7404e0b02e700f6a","src/aarch64/instruction_analysis/prologue.rs":"065172ee6e2cb868c76dad0d704f0da15397e94424cb0c5522e4bffcae1b0f19","src/aarch64/macho.rs":"ec88fb0c02707d3d96a41f22bb2f698909af26b41ac9cca6b0244e837e240504","src/aarch64/mod.rs":"a94c4c0b1d3e08bce5b0baf9a6ba1b59f42da2809ce970b8a9050b9c3c46e00a","src/aarch64/pe.rs":"6800dfee18cb8eb96d8802c4a175cfca511d9503a7b6c09d0ce7e84c28d8a1a8","src/aarch64/unwind_rule.rs":"1119387590f16f4582672095f6c9462a94e3d4eaf448baa19c432c5e57fa055d","src/aarch64/unwinder.rs":"1dd24b21a49cf1b2fdcb5fada2afb54b2df269d3560be1e1f0063604593f26f1","src/aarch64/unwindregs.rs":"19e5fd82d62eac135c9075e75c0b031f3037a4b670060b3bc6746ef6d71685f8","src/add_signed.rs":"8c52b1d7c7dfc5cbdd477ff9dcce2e888f663a19e8ef6b89c209c06f7a532203","src/arch.rs":"f7dff12cdc2cf91986a5cb3c8d492f608264bd789841a0cfab1c7042233f0488","src/cache.rs":"90569eba164d72c3d20a0465d05a92bc35ceba38c21b944ced3c759f61be3268","src/code_address.rs":"1e2bd03a5813c0100171c7020dc05d8457e2670c7ef58c0c4e3627bf1d82f1b1","src/display_utils.rs":"2f874fd4778e7e304365d8b65d60dc4a5a8fa5ee2715740dc64e334989a1276d","src/dwarf.rs":"79689d0d16a5ccdb5a6c90d690602d1b9bb0100543c2922b47a4c5715004c581","src/error.rs":"bbcaa2ede65b465bff515e19c50f4a8b76c4fcb481297a50427fd21689121294","src/instruction_analysis.rs":"1023577c008a71805338cd45b8582774dd8c69c7bb349990992733297761743e","src/lib.rs":"f57770c147c5de29b4a3600675b459ce26925ad8c5be19ab0c9545883a7a9320","src/macho.rs":"472cd64d0ef4c4d7b91f3d19307875f61db67de52273fef186da9ede89016982","src/pe.rs":"d50f13dd153d124c3b76df7930295e2310381e7490d45563382775422a858bfe","src/rule_cache.rs":"d764fe5e9202314b77e536a7ebe7cb4d574494eeaeb76d2e7a13ff6b0770cf3b","src/unwind_result.rs":"ec6898d9e66b455978880884199d5570fd220c7d3d1101c6b854b9a2b6cea88d","src/unwind_rule.rs":"3335e0d2af34961ba4eff2d89db6bdde5950909f352539e96852c42b3ca16139","src/unwinder.rs":"72874a341338b3d536b1ab4602cffe8d05fd7f59063d2f55985a5d3843203ef7","src/x86_64/arch.rs":"12ea62c70058eac1c2aa698594cc83fafc5d8ec7205596c4b6f6ff325bd1ed8d","src/x86_64/cache.rs":"57eecbc7a0eea21269ba87e80efd985b13d420b2546722ae1b7c73e2e1731169","src/x86_64/dwarf.rs":"6643cc16ac524c325c02ae3a980dd95da38f660328d7b75c1081454b85e24925","src/x86_64/instruction_analysis/epilogue.rs":"21b98f794ec11d501497904b352017d678ea57a2a1f1617a625b1044de1c79c5","src/x86_64/instruction_analysis/mod.rs":"df9089f73861574607dab07fda68b8c5bf1ff426401840a6c35503bda9996143","src/x86_64/instruction_analysis/prologue.rs":"57f2a9376a70ca708c0d9c85bd324edff8062f73102aa57a9c6319627d8189ad","src/x86_64/macho.rs":"1b8eb6622d36115ac664c54d2a8768cbadd17bdcf252e368cf5ea8a35339d5b9","src/x86_64/mod.rs":"160ad03cce68b6263028fa9eaf460a89fee57795a81adac8bed9c7d4fdf0ebad","src/x86_64/pe.rs":"25d850fc896e635831c90c1e4073900b32b02fff88858aa579a986aa4052d54e","src/x86_64/register_ordering.rs":"e4e01b5506eaf1d448874e10930035d4a67b8077803e2ceee305d9c5aa88cd2f","src/x86_64/unwind_rule.rs":"f5be036172ac469cbc8248047411cbd43931a1def52cc9fcacce54210abc9824","src/x86_64/unwinder.rs":"2d7228655cc427266e31f1405f44b9e81bb119b9eb0f4abb9a29b39697db2b44","src/x86_64/unwindregs.rs":"63b358fe31b613d456982360ff659927d540b502f9c1a3145c4ba66beb4afdfc"},"package":"33e8ad8f843eb89b4ec8270be4d5840dc6b81ca1a1c1e036b17e94076f36eed4"}
|
||||
10
third_party/rust/framehop/Cargo.toml
vendored
10
third_party/rust/framehop/Cargo.toml
vendored
@@ -12,7 +12,7 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "framehop"
|
||||
version = "0.12.1"
|
||||
version = "0.13.0"
|
||||
authors = ["Markus Stange <mstange.moz@gmail.com>"]
|
||||
exclude = [
|
||||
"/.github",
|
||||
@@ -48,7 +48,7 @@ version = "1.0.0"
|
||||
version = "0.3.0"
|
||||
|
||||
[dependencies.gimli]
|
||||
version = "0.30"
|
||||
version = "0.31"
|
||||
features = ["read"]
|
||||
default-features = false
|
||||
|
||||
@@ -56,12 +56,6 @@ default-features = false
|
||||
version = "0.4.0"
|
||||
optional = true
|
||||
|
||||
[dependencies.object]
|
||||
version = "0.36"
|
||||
features = ["read_core"]
|
||||
optional = true
|
||||
default-features = false
|
||||
|
||||
[dependencies.pe-unwind-info]
|
||||
version = "0.2.1"
|
||||
optional = true
|
||||
|
||||
45
third_party/rust/framehop/src/unwinder.rs
vendored
45
third_party/rust/framehop/src/unwinder.rs
vendored
@@ -936,51 +936,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "object")]
|
||||
mod object {
|
||||
use super::{ModuleSectionInfo, Range};
|
||||
use object::read::{Object, ObjectSection, ObjectSegment};
|
||||
|
||||
impl<'data: 'file, 'file, O, D> ModuleSectionInfo<D> for &'file O
|
||||
where
|
||||
O: Object<'data>,
|
||||
D: From<&'data [u8]>,
|
||||
{
|
||||
fn base_svma(&self) -> u64 {
|
||||
if let Some(text_segment) = self.segments().find(|s| s.name() == Ok(Some("__TEXT"))) {
|
||||
// This is a mach-O image. "Relative addresses" are relative to the
|
||||
// vmaddr of the __TEXT segment.
|
||||
return text_segment.address();
|
||||
}
|
||||
|
||||
// For PE binaries, relative_address_base() returns the image base address.
|
||||
// Otherwise it returns zero. This gives regular ELF images a base address of zero,
|
||||
// which is what we want.
|
||||
self.relative_address_base()
|
||||
}
|
||||
|
||||
fn section_svma_range(&mut self, name: &[u8]) -> Option<Range<u64>> {
|
||||
let section = self.section_by_name_bytes(name)?;
|
||||
Some(section.address()..section.address() + section.size())
|
||||
}
|
||||
|
||||
fn section_data(&mut self, name: &[u8]) -> Option<D> {
|
||||
let section = self.section_by_name_bytes(name)?;
|
||||
section.data().ok().map(|data| data.into())
|
||||
}
|
||||
|
||||
fn segment_svma_range(&mut self, name: &[u8]) -> Option<Range<u64>> {
|
||||
let segment = self.segments().find(|s| s.name_bytes() == Ok(Some(name)))?;
|
||||
Some(segment.address()..segment.address() + segment.size())
|
||||
}
|
||||
|
||||
fn segment_data(&mut self, name: &[u8]) -> Option<D> {
|
||||
let segment = self.segments().find(|s| s.name_bytes() == Ok(Some(name)))?;
|
||||
segment.data().ok().map(|data| data.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Deref<Target = [u8]>> Module<D> {
|
||||
pub fn new(
|
||||
name: String,
|
||||
|
||||
2
third_party/rust/gimli/.cargo-checksum.json
vendored
2
third_party/rust/gimli/.cargo-checksum.json
vendored
@@ -1 +1 @@
|
||||
{"files":{"CHANGELOG.md":"39644968fcea2bf6cf14f94047cc8b5e9785797631c0cd8033e4e2cdbcf27969","Cargo.toml":"1ecca3db954f8885686c1e3ca6b7222d500bc26926a46438eabd519569109c32","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"6b55025491f62ca7dd19a7a6cdb9154b06db33c85247c88a19804ab1c1ba2b5e","src/arch.rs":"735a8e871479263ad2dd86c38cc68167da320800ca70b8b1d25a224e5f3d0bd8","src/common.rs":"92bb5bc1eebe0a1906389a75096288773bb86b8895b827851bfb082a3c4999f8","src/constants.rs":"33b74f752fc11aefa1f5ef36a08c2fac19453e6d16f8e490d15b2eabcd63a55c","src/endianity.rs":"1f7e62ae34f540c06bedf1e7948739211556eea7dd83731a5ca52c7d687ed0fc","src/leb128.rs":"996d5c79d027f97c010ca487bc4ff5f8265f4b9e63d62b4e4fa291383c259ee9","src/lib.rs":"538a8080f33a0641f831e883085425c36cbce2ae39e0cd5e0b6c7c062bca7712","src/read/abbrev.rs":"f937e45d151ac5073f2c526b792e86e5ba96d3d36cb0377682a596c272be589a","src/read/addr.rs":"a6a535b690793e4c8ec85127d558e796cb8f6272533cd0418886bbc44289039e","src/read/aranges.rs":"fd3ff965cfd23c8b425c555f8f34a190764ae993433f32c63f9452c6604806cd","src/read/cfi.rs":"93e7572e44d97d10977833cedab78d68b6f0fec643edda4a613ad8ae845a93ce","src/read/dwarf.rs":"0f30d814dfe067aa6fbd0b80dac8e1a2532e2c5cd5e584c151a8915356b6b2d7","src/read/endian_reader.rs":"25752b609d74ad7dc85df84d044d0e931024a95af72a760cd51f834016775b3e","src/read/endian_slice.rs":"5b44661714967780b8c9f52fdaf655a53e309c38cbd3daf11bf6b1d5f6d067bb","src/read/index.rs":"2a28d032bc3bc5235545ac526b367512ac0aa7807909b6c02c8d3f84f5beff87","src/read/line.rs":"463fedce39895af793cdce413d9593cfd3470939f9f944fd7814ded5946d5b7e","src/read/lists.rs":"67ca9e1a36a91feb4996d035211de845205212bfda02163685d217818567ff93","src/read/loclists.rs":"a05933e752d44c1d26e83c321dbc1b8a3616b1d76ad15f488858f7f74fd3aece","src/read/lookup.rs":"0cf89ba12b9d48b1fe035dd3a497730323acb9427a9457abbc2f7c58c4c71165","src/read/mod.rs":"1154168832c544acd31f467668fb86536232138c84e5918ba3b1cc66d1554d05","src/read/op.rs":"8782f09332eea1a218aa524a67c9c1cc2e73a8210b30402519dbe8fcf21dcf6e","src/read/pubnames.rs":"ed752ee1a7017e6d3be42d81e4ddaaac960ef08081463a19106c9f041526d4a3","src/read/pubtypes.rs":"5e75b32c0923e827aff0bb2db456797a0e8d38ba46be992558a7990b3196bcf5","src/read/reader.rs":"afc9c2cfbfe0fce5b1825d029f8e841100f48b04b86181950a213fbb82e6ad63","src/read/relocate.rs":"6844b113eb8218152e29912accc54b26bc2498e97bfe4af824472ddb69b8601c","src/read/rnglists.rs":"d1afeb1779d145493a1fc665fa32820c63c539e40b10ecd5b5f343836da188e6","src/read/str.rs":"4dd98cc8d93ce6f06c194eae034bfe0a3d45a9f06fbeaca38d8f29a9c7cf15a5","src/read/unit.rs":"bcff85e55148bf141984a4cb20eb5983cfd85de6e8a4535cef2ab19e8e0f5103","src/read/util.rs":"61e41212f1c8336988c9a7a1523c2913af8c8a66d2dd59d3631ba179e801e3bd","src/read/value.rs":"1c0db3759c65ffda3520fcecd36118367dfb46845035d5d97fcba2f0ea780380","src/test_util.rs":"291eefa6b51c6d934ba2f4a4c9bc7c403046fc1cccf4d43487820f0154bb89e2","src/write/abbrev.rs":"fa02163389e92e804d139cf84f833ab6af932083f0eb2d74464b4a70bd3237ff","src/write/cfi.rs":"323ab703251a41fe83172d749c8afec7d869c5d52e8edd85d7b87450102e6e3a","src/write/dwarf.rs":"8a1a0893e31134ad68993994594f3024ad0c8af7c1188b29e0ffc26b42edef21","src/write/endian_vec.rs":"1d5811986648816a677580b22630f5059757a381487d73e9adbb3008c9ae0c58","src/write/line.rs":"80f7626f15467d69fb73a9d9fda7863fe343f236d5fcdbc353bdf2a2a4b1bb42","src/write/loc.rs":"2a58b0f57ab344f23de81e459f6fefa153e29e0384af31bbcbc80095af0fa703","src/write/mod.rs":"6e43a028baf73bf50ee276a3f08f31adc69cacdde25d56b55f14c0d48ca6f3aa","src/write/op.rs":"e599fa116366f273ca33da3428132f2b9da21c0cc50a0c0ccfd0f524ccb4e82e","src/write/range.rs":"28033849e7912f60d137c2f2e0065c5169a7f16896b179178c8e3674d7c2785e","src/write/relocate.rs":"117b97eae3ca2aad9d5b242652ebbdb333440e877be37873a7ef5ba1a39ced43","src/write/section.rs":"126a0202d606ea94d5b7ee4853afefb05f2546710210954fd0cc18af8674a511","src/write/str.rs":"4850cc2fee55980f9cbb6b4169f9861ab9d05c2b28a85c2b790480b83a66f514","src/write/unit.rs":"35419f917bd759ab026c9701ac0aef9a945ffb95a10f1c9c72608020206edf44","src/write/writer.rs":"7d5dd07b82ec3becebb060c106d4ea697cbd8b9b64a5de78403511a5244e08b1"},"package":"e2e1d97fbe9722ba9bbd0c97051c2956e726562b61f86a25a4360398a40edfc9"}
|
||||
{"files":{"CHANGELOG.md":"3947775ed524a59bfbe1e9bb695ff03629da07f12e8a15df600d2a79a8a5f8fa","Cargo.toml":"a59608f272a0c354fd03374fa94556d7858281e516a4bea82c05e42e15eb7c81","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"004c3170ee488dd64b91927a99024a884463d0dfebf98bdb9a3526d08895e0c0","src/arch.rs":"735a8e871479263ad2dd86c38cc68167da320800ca70b8b1d25a224e5f3d0bd8","src/common.rs":"f3ba70eaf7f9a5978bdcbdf20fab5f7c2bfa5d4816e765d59f064500231938da","src/constants.rs":"d28ab78922d0cb49b1cd0897b4fa46e337f3af99492f9715349807ac962c3147","src/endianity.rs":"1f7e62ae34f540c06bedf1e7948739211556eea7dd83731a5ca52c7d687ed0fc","src/leb128.rs":"81a0ca0e8ba56e749af4588fe76179611234062f79be1e9fb8ea8a27516b372f","src/lib.rs":"7c2640854e32a8b974f4e577b4a30e29f3d682e72203ce1d2a657e87752477ff","src/read/abbrev.rs":"f937e45d151ac5073f2c526b792e86e5ba96d3d36cb0377682a596c272be589a","src/read/addr.rs":"a6a535b690793e4c8ec85127d558e796cb8f6272533cd0418886bbc44289039e","src/read/aranges.rs":"464bd2deb2b7510a53c9068cc236c588aef3cad640868a4cae6fed6a0303c6af","src/read/cfi.rs":"298066749e66065b451f4ad9bbec9d959211fb2a90b0d93bb39297b1eec5f6cd","src/read/dwarf.rs":"708b821ec5c8e73530ef5b5adec092296b7e78b9187958336154840ed275fbd2","src/read/endian_reader.rs":"6e7bf5b26719b1a5e396159387bb10666b218c10e603a4552882ec9d4d5cb8be","src/read/endian_slice.rs":"5b44661714967780b8c9f52fdaf655a53e309c38cbd3daf11bf6b1d5f6d067bb","src/read/index.rs":"42c9aae82e87cf7451d4434f68cd5d84e6bb482e51742af213a711b053633085","src/read/line.rs":"46c8a779d19533e7aac4edb035ab86839a5173e3418b37d45c0a2a6bfeb6d5fc","src/read/lists.rs":"5a4e33038ceedb48a8332a7713ee67a1be9b7a4031881a2ccb9b9259ba29356b","src/read/loclists.rs":"8276b94599cc2fdbf25cee1cfd13236e1b00c12897268cd164b58f2515dab52b","src/read/lookup.rs":"0cf89ba12b9d48b1fe035dd3a497730323acb9427a9457abbc2f7c58c4c71165","src/read/mod.rs":"89b718ead26a4bda49d6185dee081c9b42a8839397fb3337c9a0ffa18105f5b5","src/read/op.rs":"0a24a7ab1de6e9799da6cce4b838c2264f88c3fd8936fd0601e3a7ac44c5a72d","src/read/pubnames.rs":"ed752ee1a7017e6d3be42d81e4ddaaac960ef08081463a19106c9f041526d4a3","src/read/pubtypes.rs":"5e75b32c0923e827aff0bb2db456797a0e8d38ba46be992558a7990b3196bcf5","src/read/reader.rs":"502b303d33e2cb93fba54c97b487bf3f2216d645ab1c6eae8f4cad9c2e8f27e2","src/read/relocate.rs":"6844b113eb8218152e29912accc54b26bc2498e97bfe4af824472ddb69b8601c","src/read/rnglists.rs":"2f4259326078b5dd7e2643265cd3c9bcfb74f938188c5adac96c8ae0ea2a99bf","src/read/str.rs":"4dd98cc8d93ce6f06c194eae034bfe0a3d45a9f06fbeaca38d8f29a9c7cf15a5","src/read/unit.rs":"f2fc760be1337a6a0351e9921e6e2e6290b17b6cfada6db96e0ebb3fdee68c90","src/read/util.rs":"61e41212f1c8336988c9a7a1523c2913af8c8a66d2dd59d3631ba179e801e3bd","src/read/value.rs":"1c0db3759c65ffda3520fcecd36118367dfb46845035d5d97fcba2f0ea780380","src/test_util.rs":"291eefa6b51c6d934ba2f4a4c9bc7c403046fc1cccf4d43487820f0154bb89e2","src/write/abbrev.rs":"fa02163389e92e804d139cf84f833ab6af932083f0eb2d74464b4a70bd3237ff","src/write/cfi.rs":"22e93c21552d8d6329f5d9ea1c515fbe3820a23e89fff1b38d9478fa2bbab723","src/write/dwarf.rs":"8a1a0893e31134ad68993994594f3024ad0c8af7c1188b29e0ffc26b42edef21","src/write/endian_vec.rs":"1d5811986648816a677580b22630f5059757a381487d73e9adbb3008c9ae0c58","src/write/line.rs":"a8686164701dc2b19954a1f17da635c6f162fe9eec3fa66b2a368ae1940e6703","src/write/loc.rs":"2a58b0f57ab344f23de81e459f6fefa153e29e0384af31bbcbc80095af0fa703","src/write/mod.rs":"7ebf0af9d4b558e1c24c7e6b84ae9c75b7e75c83d09a03ef8608973052d31913","src/write/op.rs":"e599fa116366f273ca33da3428132f2b9da21c0cc50a0c0ccfd0f524ccb4e82e","src/write/range.rs":"28033849e7912f60d137c2f2e0065c5169a7f16896b179178c8e3674d7c2785e","src/write/relocate.rs":"117b97eae3ca2aad9d5b242652ebbdb333440e877be37873a7ef5ba1a39ced43","src/write/section.rs":"126a0202d606ea94d5b7ee4853afefb05f2546710210954fd0cc18af8674a511","src/write/str.rs":"4850cc2fee55980f9cbb6b4169f9861ab9d05c2b28a85c2b790480b83a66f514","src/write/unit.rs":"e0552266b0b39b74ab4676ca5cebed333fc34e9fb0b735f3509943058926f7e3","src/write/writer.rs":"c7696a3c2cff032ad6ada696132e4bbef92c4af76c7370c9ce82dedf2cf3716b"},"package":"32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64"}
|
||||
54
third_party/rust/gimli/CHANGELOG.md
vendored
54
third_party/rust/gimli/CHANGELOG.md
vendored
@@ -2,6 +2,60 @@
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.31.0
|
||||
|
||||
Released 2024/07/16.
|
||||
|
||||
### Breaking changes
|
||||
|
||||
* Deleted support for segment selectors.
|
||||
[#720](https://github.com/gimli-rs/gimli/pull/720)
|
||||
|
||||
* Added `read::FileEntry::source` and deleted `Copy` implementation.
|
||||
[#728](https://github.com/gimli-rs/gimli/pull/728)
|
||||
|
||||
* Changed `read::LineRow::execute` to return a `Result`.
|
||||
[#731](https://github.com/gimli-rs/gimli/pull/731)
|
||||
|
||||
* Deleted `Display` implementation for `read::LineInstruction`.
|
||||
[#734](https://github.com/gimli-rs/gimli/pull/734)
|
||||
|
||||
* Changed `read::Error` to be non-exhaustive.
|
||||
|
||||
### Changed
|
||||
|
||||
* Fixed `Hash` implementation for `read::EndianReader`.
|
||||
[#723](https://github.com/gimli-rs/gimli/pull/723)
|
||||
|
||||
* Changed `read::EhFrameHdr::parse` to validate the FDE count encoding.
|
||||
[#725](https://github.com/gimli-rs/gimli/pull/725)
|
||||
|
||||
* Changed address overflow to be an error for `read::UnwindTableRow`,
|
||||
`read::LineRow`, and `read::ArangeEntry`.
|
||||
[#730](https://github.com/gimli-rs/gimli/pull/730)
|
||||
[#731](https://github.com/gimli-rs/gimli/pull/731)
|
||||
[#732](https://github.com/gimli-rs/gimli/pull/732)
|
||||
|
||||
* Changed wrapping addition for 32-bit addresses to wrap at 32 bits instead of
|
||||
at 64 bits.
|
||||
[#733](https://github.com/gimli-rs/gimli/pull/733)
|
||||
|
||||
* Added earlier validation of address sizes.
|
||||
[#733](https://github.com/gimli-rs/gimli/pull/733)
|
||||
|
||||
### Added
|
||||
|
||||
* Added `read::IndexSectionId::section_id`.
|
||||
[#719](https://github.com/gimli-rs/gimli/pull/719)
|
||||
|
||||
* Added `read::FrameDescriptionEntry::end_address`.
|
||||
[#727](https://github.com/gimli-rs/gimli/pull/727)
|
||||
|
||||
* Added support for `DW_LNCT_LLVM_source`.
|
||||
[#728](https://github.com/gimli-rs/gimli/pull/728)
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
## 0.30.0
|
||||
|
||||
Released 2024/05/26.
|
||||
|
||||
4
third_party/rust/gimli/Cargo.toml
vendored
4
third_party/rust/gimli/Cargo.toml
vendored
@@ -13,7 +13,7 @@
|
||||
edition = "2018"
|
||||
rust-version = "1.60"
|
||||
name = "gimli"
|
||||
version = "0.30.0"
|
||||
version = "0.31.0"
|
||||
include = [
|
||||
"/CHANGELOG.md",
|
||||
"/Cargo.toml",
|
||||
@@ -24,7 +24,7 @@ include = [
|
||||
]
|
||||
description = "A library for reading and writing the DWARF debugging format."
|
||||
documentation = "https://docs.rs/gimli"
|
||||
readme = "./README.md"
|
||||
readme = "README.md"
|
||||
keywords = [
|
||||
"DWARF",
|
||||
"debug",
|
||||
|
||||
2
third_party/rust/gimli/README.md
vendored
2
third_party/rust/gimli/README.md
vendored
@@ -30,7 +30,7 @@ Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
gimli = "0.30.0"
|
||||
gimli = "0.31.0"
|
||||
```
|
||||
|
||||
The minimum supported Rust version is:
|
||||
|
||||
2
third_party/rust/gimli/src/common.rs
vendored
2
third_party/rust/gimli/src/common.rs
vendored
@@ -48,8 +48,6 @@ pub struct Encoding {
|
||||
/// The size of an address.
|
||||
pub address_size: u8,
|
||||
|
||||
// The size of a segment selector.
|
||||
// TODO: pub segment_size: u8,
|
||||
/// Whether the DWARF format is 32- or 64-bit.
|
||||
pub format: Format,
|
||||
|
||||
|
||||
3
third_party/rust/gimli/src/constants.rs
vendored
3
third_party/rust/gimli/src/constants.rs
vendored
@@ -1052,7 +1052,10 @@ DwLnct(u16) {
|
||||
DW_LNCT_timestamp = 0x3,
|
||||
DW_LNCT_size = 0x4,
|
||||
DW_LNCT_MD5 = 0x5,
|
||||
// DW_LNCT_source = 0x6,
|
||||
DW_LNCT_lo_user = 0x2000,
|
||||
// We currently only implement the LLVM embedded source code extension for DWARF v5.
|
||||
DW_LNCT_LLVM_source = 0x2001,
|
||||
DW_LNCT_hi_user = 0x3fff,
|
||||
});
|
||||
|
||||
|
||||
4
third_party/rust/gimli/src/leb128.rs
vendored
4
third_party/rust/gimli/src/leb128.rs
vendored
@@ -56,7 +56,7 @@ fn low_bits_of_byte(byte: u8) -> u8 {
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
fn low_bits_of_u64(val: u64) -> u8 {
|
||||
let byte = val & u64::from(core::u8::MAX);
|
||||
let byte = val & u64::from(u8::MAX);
|
||||
low_bits_of_byte(byte as u8)
|
||||
}
|
||||
|
||||
@@ -465,7 +465,7 @@ mod tests {
|
||||
for i in -513..513 {
|
||||
inner(i);
|
||||
}
|
||||
inner(core::i64::MIN);
|
||||
inner(i64::MIN);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
193
third_party/rust/gimli/src/read/aranges.rs
vendored
193
third_party/rust/gimli/src/read/aranges.rs
vendored
@@ -1,6 +1,8 @@
|
||||
use crate::common::{DebugArangesOffset, DebugInfoOffset, Encoding, SectionId};
|
||||
use crate::endianity::Endianity;
|
||||
use crate::read::{EndianSlice, Error, Range, Reader, ReaderOffset, Result, Section};
|
||||
use crate::read::{
|
||||
EndianSlice, Error, Range, Reader, ReaderAddress, ReaderOffset, Result, Section,
|
||||
};
|
||||
|
||||
/// The `DebugAranges` struct represents the DWARF address range information
|
||||
/// found in the `.debug_aranges` section.
|
||||
@@ -135,7 +137,6 @@ where
|
||||
encoding: Encoding,
|
||||
length: Offset,
|
||||
debug_info_offset: DebugInfoOffset<Offset>,
|
||||
segment_size: u8,
|
||||
entries: R,
|
||||
}
|
||||
|
||||
@@ -158,21 +159,22 @@ where
|
||||
}
|
||||
|
||||
let debug_info_offset = rest.read_offset(format).map(DebugInfoOffset)?;
|
||||
let address_size = rest.read_u8()?;
|
||||
let address_size = rest.read_address_size()?;
|
||||
let segment_size = rest.read_u8()?;
|
||||
if segment_size != 0 {
|
||||
return Err(Error::UnsupportedSegmentSize);
|
||||
}
|
||||
|
||||
// unit_length + version + offset + address_size + segment_size
|
||||
let header_length = format.initial_length_size() + 2 + format.word_size() + 1 + 1;
|
||||
|
||||
// The first tuple following the header in each set begins at an offset that is
|
||||
// a multiple of the size of a single tuple (that is, the size of a segment selector
|
||||
// plus twice the size of an address).
|
||||
// a multiple of the size of a single tuple (that is, twice the size of an address).
|
||||
let tuple_length = address_size
|
||||
.checked_mul(2)
|
||||
.and_then(|x| x.checked_add(segment_size))
|
||||
.ok_or(Error::InvalidAddressRange)?;
|
||||
.ok_or(Error::UnsupportedAddressSize(address_size))?;
|
||||
if tuple_length == 0 {
|
||||
return Err(Error::InvalidAddressRange);
|
||||
return Err(Error::UnsupportedAddressSize(address_size));
|
||||
}
|
||||
let padding = if header_length % tuple_length == 0 {
|
||||
0
|
||||
@@ -185,14 +187,12 @@ where
|
||||
format,
|
||||
version,
|
||||
address_size,
|
||||
// TODO: segment_size
|
||||
};
|
||||
Ok(ArangeHeader {
|
||||
offset,
|
||||
encoding,
|
||||
length,
|
||||
debug_info_offset,
|
||||
segment_size,
|
||||
entries: rest,
|
||||
})
|
||||
}
|
||||
@@ -215,12 +215,6 @@ where
|
||||
self.encoding
|
||||
}
|
||||
|
||||
/// Return the segment size for this set of entries.
|
||||
#[inline]
|
||||
pub fn segment_size(&self) -> u8 {
|
||||
self.segment_size
|
||||
}
|
||||
|
||||
/// Return the offset into the .debug_info section for this set of arange entries.
|
||||
#[inline]
|
||||
pub fn debug_info_offset(&self) -> DebugInfoOffset<Offset> {
|
||||
@@ -233,7 +227,6 @@ where
|
||||
ArangeEntryIter {
|
||||
input: self.entries.clone(),
|
||||
encoding: self.encoding,
|
||||
segment_size: self.segment_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -246,7 +239,6 @@ where
|
||||
pub struct ArangeEntryIter<R: Reader> {
|
||||
input: R,
|
||||
encoding: Encoding,
|
||||
segment_size: u8,
|
||||
}
|
||||
|
||||
impl<R: Reader> ArangeEntryIter<R> {
|
||||
@@ -261,7 +253,7 @@ impl<R: Reader> ArangeEntryIter<R> {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
match ArangeEntry::parse(&mut self.input, self.encoding, self.segment_size) {
|
||||
match ArangeEntry::parse(&mut self.input, self.encoding) {
|
||||
Ok(Some(entry)) => Ok(Some(entry)),
|
||||
Ok(None) => {
|
||||
self.input.empty();
|
||||
@@ -288,61 +280,40 @@ impl<R: Reader> fallible_iterator::FallibleIterator for ArangeEntryIter<R> {
|
||||
/// A single parsed arange.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct ArangeEntry {
|
||||
segment: Option<u64>,
|
||||
address: u64,
|
||||
range: Range,
|
||||
length: u64,
|
||||
}
|
||||
|
||||
impl ArangeEntry {
|
||||
/// Parse a single arange. Return `None` for the null arange, `Some` for an actual arange.
|
||||
fn parse<R: Reader>(
|
||||
input: &mut R,
|
||||
encoding: Encoding,
|
||||
segment_size: u8,
|
||||
) -> Result<Option<Self>> {
|
||||
fn parse<R: Reader>(input: &mut R, encoding: Encoding) -> Result<Option<Self>> {
|
||||
let address_size = encoding.address_size;
|
||||
|
||||
let tuple_length = R::Offset::from_u8(2 * address_size + segment_size);
|
||||
let tuple_length = R::Offset::from_u8(2 * address_size);
|
||||
if tuple_length > input.len() {
|
||||
input.empty();
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let segment = if segment_size != 0 {
|
||||
input.read_address(segment_size)?
|
||||
} else {
|
||||
0
|
||||
};
|
||||
let address = input.read_address(address_size)?;
|
||||
let begin = input.read_address(address_size)?;
|
||||
let length = input.read_address(address_size)?;
|
||||
// Calculate end now so that we can handle overflow.
|
||||
let end = begin.add_sized(length, address_size)?;
|
||||
let range = Range { begin, end };
|
||||
|
||||
match (segment, address, length) {
|
||||
match (begin, length) {
|
||||
// This is meant to be a null terminator, but in practice it can occur
|
||||
// before the end, possibly due to a linker omitting a function and
|
||||
// leaving an unrelocated entry.
|
||||
(0, 0, 0) => Self::parse(input, encoding, segment_size),
|
||||
_ => Ok(Some(ArangeEntry {
|
||||
segment: if segment_size != 0 {
|
||||
Some(segment)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
address,
|
||||
length,
|
||||
})),
|
||||
(0, 0) => Self::parse(input, encoding),
|
||||
_ => Ok(Some(ArangeEntry { range, length })),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the segment selector of this arange.
|
||||
#[inline]
|
||||
pub fn segment(&self) -> Option<u64> {
|
||||
self.segment
|
||||
}
|
||||
|
||||
/// Return the beginning address of this arange.
|
||||
#[inline]
|
||||
pub fn address(&self) -> u64 {
|
||||
self.address
|
||||
self.range.begin
|
||||
}
|
||||
|
||||
/// Return the length of this arange.
|
||||
@@ -354,10 +325,7 @@ impl ArangeEntry {
|
||||
/// Return the range.
|
||||
#[inline]
|
||||
pub fn range(&self) -> Range {
|
||||
Range {
|
||||
begin: self.address,
|
||||
end: self.address.wrapping_add(self.length),
|
||||
}
|
||||
self.range
|
||||
}
|
||||
}
|
||||
|
||||
@@ -426,8 +394,8 @@ mod tests {
|
||||
fn test_parse_header_ok() {
|
||||
#[rustfmt::skip]
|
||||
let buf = [
|
||||
// 32-bit length = 32.
|
||||
0x20, 0x00, 0x00, 0x00,
|
||||
// 32-bit length = 28 (8 bytes header, 4 bytes padding, 16 bytes tuple data).
|
||||
0x1c, 0x00, 0x00, 0x00,
|
||||
// Version.
|
||||
0x02, 0x00,
|
||||
// Offset.
|
||||
@@ -435,11 +403,10 @@ mod tests {
|
||||
// Address size.
|
||||
0x08,
|
||||
// Segment size.
|
||||
0x04,
|
||||
// Length to here = 12, tuple length = 20.
|
||||
0x00,
|
||||
// Length to here = 12, tuple length = 16.
|
||||
// Padding to tuple length multiple = 4.
|
||||
0x10, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00,
|
||||
|
||||
// Dummy arange tuple data.
|
||||
0x20, 0x00, 0x00, 0x00,
|
||||
@@ -472,9 +439,8 @@ mod tests {
|
||||
version: 2,
|
||||
address_size: 8,
|
||||
},
|
||||
length: 0x20,
|
||||
length: 0x1c,
|
||||
debug_info_offset: DebugInfoOffset(0x0403_0201),
|
||||
segment_size: 4,
|
||||
entries: EndianSlice::new(&buf[buf.len() - 32..buf.len() - 16], LittleEndian),
|
||||
}
|
||||
);
|
||||
@@ -493,7 +459,7 @@ mod tests {
|
||||
// Address size.
|
||||
0xff,
|
||||
// Segment size.
|
||||
0xff,
|
||||
0x00,
|
||||
// Length to here = 12, tuple length = 20.
|
||||
// Padding to tuple length multiple = 4.
|
||||
0x10, 0x00, 0x00, 0x00,
|
||||
@@ -516,7 +482,7 @@ mod tests {
|
||||
|
||||
let error = ArangeHeader::parse(rest, DebugArangesOffset(0x10))
|
||||
.expect_err("should fail to parse header");
|
||||
assert_eq!(error, Error::InvalidAddressRange);
|
||||
assert_eq!(error, Error::UnsupportedAddressSize(0xff));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -556,7 +522,7 @@ mod tests {
|
||||
|
||||
let error = ArangeHeader::parse(rest, DebugArangesOffset(0x10))
|
||||
.expect_err("should fail to parse header");
|
||||
assert_eq!(error, Error::InvalidAddressRange);
|
||||
assert_eq!(error, Error::UnsupportedAddressSize(0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -566,50 +532,17 @@ mod tests {
|
||||
version: 2,
|
||||
address_size: 4,
|
||||
};
|
||||
let segment_size = 0;
|
||||
let buf = [0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09];
|
||||
let rest = &mut EndianSlice::new(&buf, LittleEndian);
|
||||
let entry =
|
||||
ArangeEntry::parse(rest, encoding, segment_size).expect("should parse entry ok");
|
||||
let entry = ArangeEntry::parse(rest, encoding).expect("should parse entry ok");
|
||||
assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian));
|
||||
assert_eq!(
|
||||
entry,
|
||||
Some(ArangeEntry {
|
||||
segment: None,
|
||||
address: 0x0403_0201,
|
||||
length: 0x0807_0605,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_entry_segment() {
|
||||
let encoding = Encoding {
|
||||
format: Format::Dwarf32,
|
||||
version: 2,
|
||||
address_size: 4,
|
||||
};
|
||||
let segment_size = 8;
|
||||
#[rustfmt::skip]
|
||||
let buf = [
|
||||
// Segment.
|
||||
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
|
||||
// Address.
|
||||
0x01, 0x02, 0x03, 0x04,
|
||||
// Length.
|
||||
0x05, 0x06, 0x07, 0x08,
|
||||
// Next tuple.
|
||||
0x09
|
||||
];
|
||||
let rest = &mut EndianSlice::new(&buf, LittleEndian);
|
||||
let entry =
|
||||
ArangeEntry::parse(rest, encoding, segment_size).expect("should parse entry ok");
|
||||
assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian));
|
||||
assert_eq!(
|
||||
entry,
|
||||
Some(ArangeEntry {
|
||||
segment: Some(0x1817_1615_1413_1211),
|
||||
address: 0x0403_0201,
|
||||
range: Range {
|
||||
begin: 0x0403_0201,
|
||||
end: 0x0403_0201 + 0x0807_0605,
|
||||
},
|
||||
length: 0x0807_0605,
|
||||
})
|
||||
);
|
||||
@@ -622,7 +555,6 @@ mod tests {
|
||||
version: 2,
|
||||
address_size: 4,
|
||||
};
|
||||
let segment_size = 0;
|
||||
#[rustfmt::skip]
|
||||
let buf = [
|
||||
// Zero tuple.
|
||||
@@ -635,16 +567,61 @@ mod tests {
|
||||
0x09
|
||||
];
|
||||
let rest = &mut EndianSlice::new(&buf, LittleEndian);
|
||||
let entry =
|
||||
ArangeEntry::parse(rest, encoding, segment_size).expect("should parse entry ok");
|
||||
let entry = ArangeEntry::parse(rest, encoding).expect("should parse entry ok");
|
||||
assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian));
|
||||
assert_eq!(
|
||||
entry,
|
||||
Some(ArangeEntry {
|
||||
segment: None,
|
||||
address: 0x0403_0201,
|
||||
range: Range {
|
||||
begin: 0x0403_0201,
|
||||
end: 0x0403_0201 + 0x0807_0605,
|
||||
},
|
||||
length: 0x0807_0605,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_entry_overflow_32() {
|
||||
let encoding = Encoding {
|
||||
format: Format::Dwarf32,
|
||||
version: 2,
|
||||
address_size: 4,
|
||||
};
|
||||
#[rustfmt::skip]
|
||||
let buf = [
|
||||
// Address.
|
||||
0x01, 0x02, 0x03, 0x84,
|
||||
// Length.
|
||||
0x05, 0x06, 0x07, 0x88,
|
||||
// Next tuple.
|
||||
0x09
|
||||
];
|
||||
let rest = &mut EndianSlice::new(&buf, LittleEndian);
|
||||
let entry = ArangeEntry::parse(rest, encoding);
|
||||
assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian));
|
||||
assert_eq!(entry, Err(Error::AddressOverflow));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_entry_overflow_64() {
|
||||
let encoding = Encoding {
|
||||
format: Format::Dwarf32,
|
||||
version: 2,
|
||||
address_size: 8,
|
||||
};
|
||||
#[rustfmt::skip]
|
||||
let buf = [
|
||||
// Address.
|
||||
0x01, 0x02, 0x03, 0x04, 0x00, 0x00, 0x00, 0x80,
|
||||
// Length.
|
||||
0x05, 0x06, 0x07, 0x08, 0x00, 0x00, 0x00, 0x80,
|
||||
// Next tuple.
|
||||
0x09
|
||||
];
|
||||
let rest = &mut EndianSlice::new(&buf, LittleEndian);
|
||||
let entry = ArangeEntry::parse(rest, encoding);
|
||||
assert_eq!(*rest, EndianSlice::new(&buf[buf.len() - 1..], LittleEndian));
|
||||
assert_eq!(entry, Err(Error::AddressOverflow));
|
||||
}
|
||||
}
|
||||
|
||||
445
third_party/rust/gimli/src/read/cfi.rs
vendored
445
third_party/rust/gimli/src/read/cfi.rs
vendored
File diff suppressed because it is too large
Load Diff
2
third_party/rust/gimli/src/read/dwarf.rs
vendored
2
third_party/rust/gimli/src/read/dwarf.rs
vendored
@@ -739,7 +739,7 @@ impl<R: Clone> Dwarf<R> {
|
||||
// parent file.
|
||||
self.ranges
|
||||
.set_debug_ranges(parent.ranges.debug_ranges().clone());
|
||||
self.sup = parent.sup.clone();
|
||||
self.sup.clone_from(&parent.sup);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
14
third_party/rust/gimli/src/read/endian_reader.rs
vendored
14
third_party/rust/gimli/src/read/endian_reader.rs
vendored
@@ -5,6 +5,7 @@ use alloc::rc::Rc;
|
||||
use alloc::string::String;
|
||||
use alloc::sync::Arc;
|
||||
use core::fmt::Debug;
|
||||
use core::hash::{Hash, Hasher};
|
||||
use core::ops::{Deref, Index, Range, RangeFrom, RangeTo};
|
||||
use core::slice;
|
||||
use core::str;
|
||||
@@ -116,7 +117,7 @@ pub type EndianArcSlice<Endian> = EndianReader<Endian, Arc<[u8]>>;
|
||||
/// pub type MmapFileReader<Endian> = gimli::EndianReader<Endian, ArcMmapFile>;
|
||||
/// # fn test(_: &MmapFileReader<gimli::NativeEndian>) { }
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Copy, Hash)]
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct EndianReader<Endian, T>
|
||||
where
|
||||
Endian: Endianity,
|
||||
@@ -144,6 +145,17 @@ where
|
||||
{
|
||||
}
|
||||
|
||||
impl<Endian, T> Hash for EndianReader<Endian, T>
|
||||
where
|
||||
Endian: Endianity,
|
||||
T: CloneStableDeref<Target = [u8]> + Debug,
|
||||
{
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
// This must match the `PartialEq` implementation.
|
||||
self.bytes().hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
// This is separated out from `EndianReader` so that we can avoid running afoul
|
||||
// of borrowck. We need to `read_slice(&mut self, ...) -> &[u8]` and then call
|
||||
// `self.endian.read_whatever` on the result. The problem is that the returned
|
||||
|
||||
14
third_party/rust/gimli/src/read/index.rs
vendored
14
third_party/rust/gimli/src/read/index.rs
vendored
@@ -363,9 +363,9 @@ pub enum IndexSectionId {
|
||||
}
|
||||
|
||||
impl IndexSectionId {
|
||||
/// Returns the ELF section name for this kind, when found in a .dwo or .dwp file.
|
||||
pub fn dwo_name(self) -> &'static str {
|
||||
let section_id = match self {
|
||||
/// Returns the corresponding `SectionId`.
|
||||
pub fn section_id(self) -> SectionId {
|
||||
match self {
|
||||
IndexSectionId::DebugAbbrev => SectionId::DebugAbbrev,
|
||||
IndexSectionId::DebugInfo => SectionId::DebugInfo,
|
||||
IndexSectionId::DebugLine => SectionId::DebugLine,
|
||||
@@ -376,8 +376,12 @@ impl IndexSectionId {
|
||||
IndexSectionId::DebugRngLists => SectionId::DebugRngLists,
|
||||
IndexSectionId::DebugStrOffsets => SectionId::DebugStrOffsets,
|
||||
IndexSectionId::DebugTypes => SectionId::DebugTypes,
|
||||
};
|
||||
section_id.dwo_name().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the ELF section name for this kind, when found in a .dwo or .dwp file.
|
||||
pub fn dwo_name(self) -> &'static str {
|
||||
self.section_id().dwo_name().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
261
third_party/rust/gimli/src/read/line.rs
vendored
261
third_party/rust/gimli/src/read/line.rs
vendored
@@ -1,7 +1,5 @@
|
||||
use alloc::vec::Vec;
|
||||
use core::fmt;
|
||||
use core::num::{NonZeroU64, Wrapping};
|
||||
use core::result;
|
||||
|
||||
use crate::common::{
|
||||
DebugLineOffset, DebugLineStrOffset, DebugStrOffset, DebugStrOffsetsIndex, Encoding, Format,
|
||||
@@ -9,7 +7,9 @@ use crate::common::{
|
||||
};
|
||||
use crate::constants;
|
||||
use crate::endianity::Endianity;
|
||||
use crate::read::{AttributeValue, EndianSlice, Error, Reader, ReaderOffset, Result, Section};
|
||||
use crate::read::{
|
||||
AttributeValue, EndianSlice, Error, Reader, ReaderAddress, ReaderOffset, Result, Section,
|
||||
};
|
||||
|
||||
/// The `DebugLine` struct contains the source location to instruction mapping
|
||||
/// found in the `.debug_line` section.
|
||||
@@ -240,7 +240,7 @@ where
|
||||
Err(err) => return Err(err),
|
||||
Ok(None) => return Ok(None),
|
||||
Ok(Some(instruction)) => {
|
||||
if self.row.execute(instruction, &mut self.program) {
|
||||
if self.row.execute(instruction, &mut self.program)? {
|
||||
if self.row.tombstone {
|
||||
// Perform any reset that was required for the tombstone row.
|
||||
// Normally this is done when `next_row` is called again, but for
|
||||
@@ -516,58 +516,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<R, Offset> fmt::Display for LineInstruction<R, Offset>
|
||||
where
|
||||
R: Reader<Offset = Offset>,
|
||||
Offset: ReaderOffset,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> {
|
||||
match *self {
|
||||
LineInstruction::Special(opcode) => write!(f, "Special opcode {}", opcode),
|
||||
LineInstruction::Copy => write!(f, "{}", constants::DW_LNS_copy),
|
||||
LineInstruction::AdvancePc(advance) => {
|
||||
write!(f, "{} by {}", constants::DW_LNS_advance_pc, advance)
|
||||
}
|
||||
LineInstruction::AdvanceLine(increment) => {
|
||||
write!(f, "{} by {}", constants::DW_LNS_advance_line, increment)
|
||||
}
|
||||
LineInstruction::SetFile(file) => {
|
||||
write!(f, "{} to {}", constants::DW_LNS_set_file, file)
|
||||
}
|
||||
LineInstruction::SetColumn(column) => {
|
||||
write!(f, "{} to {}", constants::DW_LNS_set_column, column)
|
||||
}
|
||||
LineInstruction::NegateStatement => write!(f, "{}", constants::DW_LNS_negate_stmt),
|
||||
LineInstruction::SetBasicBlock => write!(f, "{}", constants::DW_LNS_set_basic_block),
|
||||
LineInstruction::ConstAddPc => write!(f, "{}", constants::DW_LNS_const_add_pc),
|
||||
LineInstruction::FixedAddPc(advance) => {
|
||||
write!(f, "{} by {}", constants::DW_LNS_fixed_advance_pc, advance)
|
||||
}
|
||||
LineInstruction::SetPrologueEnd => write!(f, "{}", constants::DW_LNS_set_prologue_end),
|
||||
LineInstruction::SetEpilogueBegin => {
|
||||
write!(f, "{}", constants::DW_LNS_set_epilogue_begin)
|
||||
}
|
||||
LineInstruction::SetIsa(isa) => write!(f, "{} to {}", constants::DW_LNS_set_isa, isa),
|
||||
LineInstruction::UnknownStandard0(opcode) => write!(f, "Unknown {}", opcode),
|
||||
LineInstruction::UnknownStandard1(opcode, arg) => {
|
||||
write!(f, "Unknown {} with operand {}", opcode, arg)
|
||||
}
|
||||
LineInstruction::UnknownStandardN(opcode, ref args) => {
|
||||
write!(f, "Unknown {} with operands {:?}", opcode, args)
|
||||
}
|
||||
LineInstruction::EndSequence => write!(f, "{}", constants::DW_LNE_end_sequence),
|
||||
LineInstruction::SetAddress(address) => {
|
||||
write!(f, "{} to {}", constants::DW_LNE_set_address, address)
|
||||
}
|
||||
LineInstruction::DefineFile(_) => write!(f, "{}", constants::DW_LNE_define_file),
|
||||
LineInstruction::SetDiscriminator(discr) => {
|
||||
write!(f, "{} to {}", constants::DW_LNE_set_discriminator, discr)
|
||||
}
|
||||
LineInstruction::UnknownExtended(opcode, _) => write!(f, "Unknown {}", opcode),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Deprecated. `OpcodesIter` has been renamed to `LineInstructions`.
|
||||
#[deprecated(note = "OpcodesIter has been renamed to LineInstructions, use that instead.")]
|
||||
pub type OpcodesIter<R> = LineInstructions<R>;
|
||||
@@ -631,7 +579,7 @@ pub type LineNumberRow = LineRow;
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub struct LineRow {
|
||||
tombstone: bool,
|
||||
address: Wrapping<u64>,
|
||||
address: u64,
|
||||
op_index: Wrapping<u64>,
|
||||
file: u64,
|
||||
line: Wrapping<u64>,
|
||||
@@ -652,7 +600,7 @@ impl LineRow {
|
||||
// "At the beginning of each sequence within a line number program, the
|
||||
// state of the registers is:" -- Section 6.2.2
|
||||
tombstone: false,
|
||||
address: Wrapping(0),
|
||||
address: 0,
|
||||
op_index: Wrapping(0),
|
||||
file: 1,
|
||||
line: Wrapping(1),
|
||||
@@ -676,7 +624,7 @@ impl LineRow {
|
||||
/// generated by the compiler."
|
||||
#[inline]
|
||||
pub fn address(&self) -> u64 {
|
||||
self.address.0
|
||||
self.address
|
||||
}
|
||||
|
||||
/// > An unsigned integer representing the index of an operation within a VLIW
|
||||
@@ -800,21 +748,21 @@ impl LineRow {
|
||||
&mut self,
|
||||
instruction: LineInstruction<R>,
|
||||
program: &mut Program,
|
||||
) -> bool
|
||||
) -> Result<bool>
|
||||
where
|
||||
Program: LineProgram<R>,
|
||||
R: Reader,
|
||||
{
|
||||
match instruction {
|
||||
Ok(match instruction {
|
||||
LineInstruction::Special(opcode) => {
|
||||
self.exec_special_opcode(opcode, program.header());
|
||||
self.exec_special_opcode(opcode, program.header())?;
|
||||
true
|
||||
}
|
||||
|
||||
LineInstruction::Copy => true,
|
||||
|
||||
LineInstruction::AdvancePc(operation_advance) => {
|
||||
self.apply_operation_advance(operation_advance, program.header());
|
||||
self.apply_operation_advance(operation_advance, program.header())?;
|
||||
false
|
||||
}
|
||||
|
||||
@@ -846,13 +794,16 @@ impl LineRow {
|
||||
LineInstruction::ConstAddPc => {
|
||||
let adjusted = self.adjust_opcode(255, program.header());
|
||||
let operation_advance = adjusted / program.header().line_encoding.line_range;
|
||||
self.apply_operation_advance(u64::from(operation_advance), program.header());
|
||||
self.apply_operation_advance(u64::from(operation_advance), program.header())?;
|
||||
false
|
||||
}
|
||||
|
||||
LineInstruction::FixedAddPc(operand) => {
|
||||
self.address += Wrapping(u64::from(operand));
|
||||
if !self.tombstone {
|
||||
let address_size = program.header().address_size();
|
||||
self.address = self.address.add_sized(u64::from(operand), address_size)?;
|
||||
self.op_index.0 = 0;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
@@ -879,8 +830,13 @@ impl LineRow {
|
||||
LineInstruction::SetAddress(address) => {
|
||||
let tombstone_address = !0 >> (64 - program.header().encoding.address_size * 8);
|
||||
self.tombstone = address == tombstone_address;
|
||||
self.address.0 = address;
|
||||
if !self.tombstone {
|
||||
if address < self.address {
|
||||
return Err(Error::InvalidAddressRange);
|
||||
}
|
||||
self.address = address;
|
||||
self.op_index.0 = 0;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
@@ -899,7 +855,7 @@ impl LineRow {
|
||||
| LineInstruction::UnknownStandard1(_, _)
|
||||
| LineInstruction::UnknownStandardN(_, _)
|
||||
| LineInstruction::UnknownExtended(_, _) => false,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Perform any reset that was required after copying the previous row.
|
||||
@@ -940,7 +896,11 @@ impl LineRow {
|
||||
&mut self,
|
||||
operation_advance: u64,
|
||||
header: &LineProgramHeader<R>,
|
||||
) {
|
||||
) -> Result<()> {
|
||||
if self.tombstone {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let operation_advance = Wrapping(operation_advance);
|
||||
|
||||
let minimum_instruction_length = u64::from(header.line_encoding.minimum_instruction_length);
|
||||
@@ -950,15 +910,19 @@ impl LineRow {
|
||||
u64::from(header.line_encoding.maximum_operations_per_instruction);
|
||||
let maximum_operations_per_instruction = Wrapping(maximum_operations_per_instruction);
|
||||
|
||||
if maximum_operations_per_instruction.0 == 1 {
|
||||
self.address += minimum_instruction_length * operation_advance;
|
||||
let address_advance = if maximum_operations_per_instruction.0 == 1 {
|
||||
self.op_index.0 = 0;
|
||||
minimum_instruction_length * operation_advance
|
||||
} else {
|
||||
let op_index_with_advance = self.op_index + operation_advance;
|
||||
self.address += minimum_instruction_length
|
||||
* (op_index_with_advance / maximum_operations_per_instruction);
|
||||
self.op_index = op_index_with_advance % maximum_operations_per_instruction;
|
||||
}
|
||||
minimum_instruction_length
|
||||
* (op_index_with_advance / maximum_operations_per_instruction)
|
||||
};
|
||||
self.address = self
|
||||
.address
|
||||
.add_sized(address_advance.0, header.address_size())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -967,7 +931,11 @@ impl LineRow {
|
||||
}
|
||||
|
||||
/// Section 6.2.5.1
|
||||
fn exec_special_opcode<R: Reader>(&mut self, opcode: u8, header: &LineProgramHeader<R>) {
|
||||
fn exec_special_opcode<R: Reader>(
|
||||
&mut self,
|
||||
opcode: u8,
|
||||
header: &LineProgramHeader<R>,
|
||||
) -> Result<()> {
|
||||
let adjusted_opcode = self.adjust_opcode(opcode, header);
|
||||
|
||||
let line_range = header.line_encoding.line_range;
|
||||
@@ -979,7 +947,8 @@ impl LineRow {
|
||||
self.apply_line_advance(line_base + i64::from(line_advance));
|
||||
|
||||
// Step 2
|
||||
self.apply_operation_advance(u64::from(operation_advance), header);
|
||||
self.apply_operation_advance(u64::from(operation_advance), header)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1224,6 +1193,13 @@ where
|
||||
.any(|x| x.content_type == constants::DW_LNCT_MD5)
|
||||
}
|
||||
|
||||
/// Return true if the file name entry format contains a source field.
|
||||
pub fn file_has_source(&self) -> bool {
|
||||
self.file_name_entry_format
|
||||
.iter()
|
||||
.any(|x| x.content_type == constants::DW_LNCT_LLVM_source)
|
||||
}
|
||||
|
||||
/// Get the list of source files that appear in this header's line program.
|
||||
pub fn file_names(&self) -> &[FileEntry<R, Offset>] {
|
||||
&self.file_names[..]
|
||||
@@ -1293,7 +1269,7 @@ where
|
||||
}
|
||||
|
||||
if version >= 5 {
|
||||
address_size = rest.read_u8()?;
|
||||
address_size = rest.read_address_size()?;
|
||||
let segment_selector_size = rest.read_u8()?;
|
||||
if segment_selector_size != 0 {
|
||||
return Err(Error::UnsupportedSegmentSize);
|
||||
@@ -1380,6 +1356,7 @@ where
|
||||
timestamp: 0,
|
||||
size: 0,
|
||||
md5: [0; 16],
|
||||
source: None,
|
||||
});
|
||||
|
||||
file_name_entry_format = Vec::new();
|
||||
@@ -1579,6 +1556,7 @@ where
|
||||
timestamp: u64,
|
||||
size: u64,
|
||||
md5: [u8; 16],
|
||||
source: Option<AttributeValue<R, Offset>>,
|
||||
}
|
||||
|
||||
impl<R, Offset> FileEntry<R, Offset>
|
||||
@@ -1598,6 +1576,7 @@ where
|
||||
timestamp,
|
||||
size,
|
||||
md5: [0; 16],
|
||||
source: None,
|
||||
};
|
||||
|
||||
Ok(entry)
|
||||
@@ -1667,6 +1646,16 @@ where
|
||||
pub fn md5(&self) -> &[u8; 16] {
|
||||
&self.md5
|
||||
}
|
||||
|
||||
/// The source code of this file. (UTF-8 source text string with "\n" line
|
||||
/// endings).
|
||||
///
|
||||
/// Note: For DWARF v5 files this may return an empty attribute that
|
||||
/// indicates that no source code is available, which this function
|
||||
/// represents as Some(<zero-length attr>).
|
||||
pub fn source(&self) -> Option<AttributeValue<R, Offset>> {
|
||||
self.source.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// The format of a component of an include directory or file name entry.
|
||||
@@ -1686,8 +1675,8 @@ impl FileEntryFormat {
|
||||
let mut path_count = 0;
|
||||
for _ in 0..format_count {
|
||||
let content_type = input.read_uleb128()?;
|
||||
let content_type = if content_type > u64::from(u16::max_value()) {
|
||||
constants::DwLnct(u16::max_value())
|
||||
let content_type = if content_type > u64::from(u16::MAX) {
|
||||
constants::DwLnct(u16::MAX)
|
||||
} else {
|
||||
constants::DwLnct(content_type as u16)
|
||||
};
|
||||
@@ -1733,6 +1722,7 @@ fn parse_file_v5<R: Reader>(
|
||||
let mut timestamp = 0;
|
||||
let mut size = 0;
|
||||
let mut md5 = [0; 16];
|
||||
let mut source = None;
|
||||
|
||||
for format in formats {
|
||||
let value = parse_attribute(input, encoding, format.form)?;
|
||||
@@ -1760,6 +1750,9 @@ fn parse_file_v5<R: Reader>(
|
||||
}
|
||||
}
|
||||
}
|
||||
constants::DW_LNCT_LLVM_source => {
|
||||
source = Some(value);
|
||||
}
|
||||
// Ignore unknown content types.
|
||||
_ => {}
|
||||
}
|
||||
@@ -1771,6 +1764,7 @@ fn parse_file_v5<R: Reader>(
|
||||
timestamp,
|
||||
size,
|
||||
md5,
|
||||
source,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1886,8 +1880,6 @@ mod tests {
|
||||
use crate::endianity::LittleEndian;
|
||||
use crate::read::{EndianSlice, Error};
|
||||
use crate::test_util::GimliSectionMethods;
|
||||
use core::u64;
|
||||
use core::u8;
|
||||
use test_assembler::{Endian, Label, LabelMaker, Section};
|
||||
|
||||
#[test]
|
||||
@@ -1986,6 +1978,7 @@ mod tests {
|
||||
timestamp: 0,
|
||||
size: 0,
|
||||
md5: [0; 16],
|
||||
source: None,
|
||||
},
|
||||
FileEntry {
|
||||
path_name: AttributeValue::String(EndianSlice::new(b"bar.h", LittleEndian)),
|
||||
@@ -1993,6 +1986,7 @@ mod tests {
|
||||
timestamp: 0,
|
||||
size: 0,
|
||||
md5: [0; 16],
|
||||
source: None,
|
||||
},
|
||||
];
|
||||
assert_eq!(header.file_names(), &expected_file_names);
|
||||
@@ -2151,6 +2145,7 @@ mod tests {
|
||||
timestamp: 0,
|
||||
size: 0,
|
||||
md5: [0; 16],
|
||||
source: None,
|
||||
},
|
||||
FileEntry {
|
||||
path_name: AttributeValue::String(EndianSlice::new(b"bar.rs", LittleEndian)),
|
||||
@@ -2158,6 +2153,7 @@ mod tests {
|
||||
timestamp: 0,
|
||||
size: 0,
|
||||
md5: [0; 16],
|
||||
source: None,
|
||||
},
|
||||
],
|
||||
include_directories: vec![],
|
||||
@@ -2404,6 +2400,7 @@ mod tests {
|
||||
timestamp: 1,
|
||||
size: 2,
|
||||
md5: [0; 16],
|
||||
source: None,
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -2427,6 +2424,7 @@ mod tests {
|
||||
timestamp: 0,
|
||||
size: 0,
|
||||
md5: [0; 16],
|
||||
source: None,
|
||||
};
|
||||
|
||||
let mut header = make_test_header(EndianSlice::new(&[], LittleEndian));
|
||||
@@ -2451,7 +2449,7 @@ mod tests {
|
||||
let mut program = IncompleteLineProgram { header };
|
||||
let is_new_row = registers.execute(opcode, &mut program);
|
||||
|
||||
assert_eq!(is_new_row, expect_new_row);
|
||||
assert_eq!(is_new_row, Ok(expect_new_row));
|
||||
assert_eq!(registers, expected_registers);
|
||||
}
|
||||
|
||||
@@ -2504,7 +2502,7 @@ mod tests {
|
||||
let opcode = LineInstruction::Special(52);
|
||||
|
||||
let mut expected_registers = initial_registers;
|
||||
expected_registers.address.0 += 3;
|
||||
expected_registers.address += 3;
|
||||
|
||||
assert_exec_opcode(header, initial_registers, opcode, expected_registers, true);
|
||||
}
|
||||
@@ -2518,7 +2516,7 @@ mod tests {
|
||||
let opcode = LineInstruction::Special(55);
|
||||
|
||||
let mut expected_registers = initial_registers;
|
||||
expected_registers.address.0 += 3;
|
||||
expected_registers.address += 3;
|
||||
expected_registers.line.0 += 3;
|
||||
|
||||
assert_exec_opcode(header, initial_registers, opcode, expected_registers, true);
|
||||
@@ -2534,7 +2532,7 @@ mod tests {
|
||||
let opcode = LineInstruction::Special(49);
|
||||
|
||||
let mut expected_registers = initial_registers;
|
||||
expected_registers.address.0 += 3;
|
||||
expected_registers.address += 3;
|
||||
expected_registers.line.0 -= 3;
|
||||
|
||||
assert_exec_opcode(header, initial_registers, opcode, expected_registers, true);
|
||||
@@ -2563,7 +2561,7 @@ mod tests {
|
||||
let header = make_test_header(EndianSlice::new(&[], LittleEndian));
|
||||
|
||||
let mut initial_registers = LineRow::new(&header);
|
||||
initial_registers.address.0 = 1337;
|
||||
initial_registers.address = 1337;
|
||||
initial_registers.line.0 = 42;
|
||||
|
||||
let opcode = LineInstruction::Copy;
|
||||
@@ -2580,23 +2578,33 @@ mod tests {
|
||||
let opcode = LineInstruction::AdvancePc(42);
|
||||
|
||||
let mut expected_registers = initial_registers;
|
||||
expected_registers.address.0 += 42;
|
||||
expected_registers.address += 42;
|
||||
|
||||
assert_exec_opcode(header, initial_registers, opcode, expected_registers, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exec_advance_pc_overflow() {
|
||||
let header = make_test_header(EndianSlice::new(&[], LittleEndian));
|
||||
fn test_exec_advance_pc_overflow_32() {
|
||||
let mut header = make_test_header(EndianSlice::new(&[], LittleEndian));
|
||||
header.encoding.address_size = 4;
|
||||
let mut registers = LineRow::new(&header);
|
||||
registers.address = u32::MAX.into();
|
||||
let opcode = LineInstruction::AdvancePc(42);
|
||||
let mut program = IncompleteLineProgram { header };
|
||||
let result = registers.execute(opcode, &mut program);
|
||||
assert_eq!(result, Err(Error::AddressOverflow));
|
||||
}
|
||||
|
||||
let mut initial_registers = LineRow::new(&header);
|
||||
initial_registers.address.0 = u64::MAX;
|
||||
|
||||
let mut expected_registers = initial_registers;
|
||||
expected_registers.address.0 = 41;
|
||||
|
||||
assert_exec_opcode(header, initial_registers, opcode, expected_registers, false);
|
||||
#[test]
|
||||
fn test_exec_advance_pc_overflow_64() {
|
||||
let mut header = make_test_header(EndianSlice::new(&[], LittleEndian));
|
||||
header.encoding.address_size = 8;
|
||||
let mut registers = LineRow::new(&header);
|
||||
registers.address = u64::MAX;
|
||||
let opcode = LineInstruction::AdvancePc(42);
|
||||
let mut program = IncompleteLineProgram { header };
|
||||
let result = registers.execute(opcode, &mut program);
|
||||
assert_eq!(result, Err(Error::AddressOverflow));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -2729,11 +2737,22 @@ mod tests {
|
||||
let opcode = LineInstruction::ConstAddPc;
|
||||
|
||||
let mut expected_registers = initial_registers;
|
||||
expected_registers.address.0 += 20;
|
||||
expected_registers.address += 20;
|
||||
|
||||
assert_exec_opcode(header, initial_registers, opcode, expected_registers, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exec_const_add_pc_overflow() {
|
||||
let header = make_test_header(EndianSlice::new(&[], LittleEndian));
|
||||
let mut registers = LineRow::new(&header);
|
||||
registers.address = u64::MAX;
|
||||
let opcode = LineInstruction::ConstAddPc;
|
||||
let mut program = IncompleteLineProgram { header };
|
||||
let result = registers.execute(opcode, &mut program);
|
||||
assert_eq!(result, Err(Error::AddressOverflow));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exec_fixed_add_pc() {
|
||||
let header = make_test_header(EndianSlice::new(&[], LittleEndian));
|
||||
@@ -2744,12 +2763,24 @@ mod tests {
|
||||
let opcode = LineInstruction::FixedAddPc(10);
|
||||
|
||||
let mut expected_registers = initial_registers;
|
||||
expected_registers.address.0 += 10;
|
||||
expected_registers.address += 10;
|
||||
expected_registers.op_index.0 = 0;
|
||||
|
||||
assert_exec_opcode(header, initial_registers, opcode, expected_registers, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exec_fixed_add_pc_overflow() {
|
||||
let header = make_test_header(EndianSlice::new(&[], LittleEndian));
|
||||
let mut registers = LineRow::new(&header);
|
||||
registers.address = u64::MAX;
|
||||
registers.op_index.0 = 1;
|
||||
let opcode = LineInstruction::FixedAddPc(10);
|
||||
let mut program = IncompleteLineProgram { header };
|
||||
let result = registers.execute(opcode, &mut program);
|
||||
assert_eq!(result, Err(Error::AddressOverflow));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exec_set_prologue_end() {
|
||||
let header = make_test_header(EndianSlice::new(&[], LittleEndian));
|
||||
@@ -2826,7 +2857,7 @@ mod tests {
|
||||
let opcode = LineInstruction::SetAddress(3030);
|
||||
|
||||
let mut expected_registers = initial_registers;
|
||||
expected_registers.address.0 = 3030;
|
||||
expected_registers.address = 3030;
|
||||
|
||||
assert_exec_opcode(header, initial_registers, opcode, expected_registers, false);
|
||||
}
|
||||
@@ -2839,11 +2870,22 @@ mod tests {
|
||||
|
||||
let mut expected_registers = initial_registers;
|
||||
expected_registers.tombstone = true;
|
||||
expected_registers.address.0 = !0;
|
||||
|
||||
assert_exec_opcode(header, initial_registers, opcode, expected_registers, false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exec_set_address_backwards() {
|
||||
let header = make_test_header(EndianSlice::new(&[], LittleEndian));
|
||||
let mut registers = LineRow::new(&header);
|
||||
registers.address = 1;
|
||||
let opcode = LineInstruction::SetAddress(0);
|
||||
|
||||
let mut program = IncompleteLineProgram { header };
|
||||
let result = registers.execute(opcode, &mut program);
|
||||
assert_eq!(result, Err(Error::InvalidAddressRange));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_exec_define_file() {
|
||||
let mut program = make_test_program(EndianSlice::new(&[], LittleEndian));
|
||||
@@ -2855,10 +2897,11 @@ mod tests {
|
||||
timestamp: 0,
|
||||
size: 0,
|
||||
md5: [0; 16],
|
||||
source: None,
|
||||
};
|
||||
|
||||
let opcode = LineInstruction::DefineFile(file);
|
||||
let is_new_row = row.execute(opcode, &mut program);
|
||||
let is_new_row = row.execute(opcode, &mut program).unwrap();
|
||||
|
||||
assert!(!is_new_row);
|
||||
assert_eq!(Some(&file), program.header().file_names.last());
|
||||
@@ -2916,6 +2959,10 @@ mod tests {
|
||||
timestamp: 0,
|
||||
size: 0,
|
||||
md5: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
|
||||
source: Some(AttributeValue::String(EndianSlice::new(
|
||||
b"foobar",
|
||||
LittleEndian,
|
||||
))),
|
||||
},
|
||||
FileEntry {
|
||||
path_name: AttributeValue::String(EndianSlice::new(b"file2", LittleEndian)),
|
||||
@@ -2925,6 +2972,10 @@ mod tests {
|
||||
md5: [
|
||||
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
|
||||
],
|
||||
source: Some(AttributeValue::String(EndianSlice::new(
|
||||
b"quux",
|
||||
LittleEndian,
|
||||
))),
|
||||
},
|
||||
];
|
||||
|
||||
@@ -2967,21 +3018,25 @@ mod tests {
|
||||
.append_bytes(b"dir1\0")
|
||||
.append_bytes(b"dir2\0")
|
||||
// File entry format count.
|
||||
.D8(3)
|
||||
.D8(4)
|
||||
.uleb(constants::DW_LNCT_path.0 as u64)
|
||||
.uleb(constants::DW_FORM_string.0 as u64)
|
||||
.uleb(constants::DW_LNCT_directory_index.0 as u64)
|
||||
.uleb(constants::DW_FORM_data1.0 as u64)
|
||||
.uleb(constants::DW_LNCT_MD5.0 as u64)
|
||||
.uleb(constants::DW_FORM_data16.0 as u64)
|
||||
.uleb(constants::DW_LNCT_LLVM_source.0 as u64)
|
||||
.uleb(constants::DW_FORM_string.0 as u64)
|
||||
// File count.
|
||||
.D8(2)
|
||||
.append_bytes(b"file1\0")
|
||||
.D8(0)
|
||||
.append_bytes(&expected_file_names[0].md5)
|
||||
.append_bytes(b"foobar\0")
|
||||
.append_bytes(b"file2\0")
|
||||
.D8(1)
|
||||
.append_bytes(&expected_file_names[1].md5)
|
||||
.append_bytes(b"quux\0")
|
||||
.mark(&header_end)
|
||||
// Dummy line program data.
|
||||
.append_bytes(expected_program)
|
||||
@@ -3033,6 +3088,10 @@ mod tests {
|
||||
FileEntryFormat {
|
||||
content_type: constants::DW_LNCT_MD5,
|
||||
form: constants::DW_FORM_data16,
|
||||
},
|
||||
FileEntryFormat {
|
||||
content_type: constants::DW_LNCT_LLVM_source,
|
||||
form: constants::DW_FORM_string,
|
||||
}
|
||||
]
|
||||
);
|
||||
|
||||
2
third_party/rust/gimli/src/read/lists.rs
vendored
2
third_party/rust/gimli/src/read/lists.rs
vendored
@@ -49,7 +49,7 @@ fn parse_header<R: Reader>(input: &mut R) -> Result<ListsHeader> {
|
||||
return Err(Error::UnknownVersion(u64::from(version)));
|
||||
}
|
||||
|
||||
let address_size = input.read_u8()?;
|
||||
let address_size = input.read_address_size()?;
|
||||
let segment_selector_size = input.read_u8()?;
|
||||
if segment_selector_size != 0 {
|
||||
return Err(Error::UnsupportedSegmentSize);
|
||||
|
||||
15
third_party/rust/gimli/src/read/loclists.rs
vendored
15
third_party/rust/gimli/src/read/loclists.rs
vendored
@@ -6,7 +6,7 @@ use crate::constants;
|
||||
use crate::endianity::Endianity;
|
||||
use crate::read::{
|
||||
lists::ListsHeader, DebugAddr, EndianSlice, Error, Expression, Range, RawRange, Reader,
|
||||
ReaderOffset, ReaderOffsetId, Result, Section,
|
||||
ReaderAddress, ReaderOffset, ReaderOffsetId, Result, Section,
|
||||
};
|
||||
|
||||
/// The raw contents of the `.debug_loc` section.
|
||||
@@ -593,7 +593,8 @@ impl<R: Reader> LocListIter<R> {
|
||||
&mut self,
|
||||
raw_loc: RawLocListEntry<R>,
|
||||
) -> Result<Option<LocationListEntry<R>>> {
|
||||
let mask = !0 >> (64 - self.raw.encoding.address_size * 8);
|
||||
let address_size = self.raw.encoding.address_size;
|
||||
let mask = u64::ones_sized(address_size);
|
||||
let tombstone = if self.raw.encoding.version <= 4 {
|
||||
mask - 1
|
||||
} else {
|
||||
@@ -620,13 +621,13 @@ impl<R: Reader> LocListIter<R> {
|
||||
data,
|
||||
} => {
|
||||
let begin = self.get_address(begin)?;
|
||||
let end = begin.wrapping_add(length) & mask;
|
||||
let end = begin.wrapping_add_sized(length, address_size);
|
||||
(Range { begin, end }, data)
|
||||
}
|
||||
RawLocListEntry::DefaultLocation { data } => (
|
||||
Range {
|
||||
begin: 0,
|
||||
end: u64::max_value(),
|
||||
end: u64::MAX,
|
||||
},
|
||||
data,
|
||||
),
|
||||
@@ -645,7 +646,7 @@ impl<R: Reader> LocListIter<R> {
|
||||
length,
|
||||
data,
|
||||
} => {
|
||||
let end = begin.wrapping_add(length) & mask;
|
||||
let end = begin.wrapping_add_sized(length, address_size);
|
||||
(Range { begin, end }, data)
|
||||
}
|
||||
};
|
||||
@@ -880,7 +881,7 @@ mod tests {
|
||||
Ok(Some(LocationListEntry {
|
||||
range: Range {
|
||||
begin: 0,
|
||||
end: u64::max_value(),
|
||||
end: u64::MAX,
|
||||
},
|
||||
data: Expression(EndianSlice::new(&[10, 0, 0, 0], LittleEndian)),
|
||||
}))
|
||||
@@ -1144,7 +1145,7 @@ mod tests {
|
||||
Ok(Some(LocationListEntry {
|
||||
range: Range {
|
||||
begin: 0,
|
||||
end: u64::max_value(),
|
||||
end: u64::MAX,
|
||||
},
|
||||
data: Expression(EndianSlice::new(&[10, 0, 0, 0], LittleEndian)),
|
||||
}))
|
||||
|
||||
7
third_party/rust/gimli/src/read/mod.rs
vendored
7
third_party/rust/gimli/src/read/mod.rs
vendored
@@ -275,6 +275,7 @@ pub type EndianBuf<'input, Endian> = EndianSlice<'input, Endian>;
|
||||
|
||||
/// An error that occurred when parsing.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[non_exhaustive]
|
||||
pub enum Error {
|
||||
/// An I/O error occurred while reading.
|
||||
Io,
|
||||
@@ -386,6 +387,11 @@ pub enum Error {
|
||||
UnknownCallFrameInstruction(constants::DwCfa),
|
||||
/// The end of an address range was before the beginning.
|
||||
InvalidAddressRange,
|
||||
/// An address calculation overflowed.
|
||||
///
|
||||
/// This is returned in cases where the address is expected to be
|
||||
/// larger than a previous address, but the calculation overflowed.
|
||||
AddressOverflow,
|
||||
/// Encountered a call frame instruction in a context in which it is not
|
||||
/// valid.
|
||||
CfiInstructionInInvalidContext,
|
||||
@@ -543,6 +549,7 @@ impl Error {
|
||||
Error::InvalidAddressRange => {
|
||||
"The end of an address range must not be before the beginning."
|
||||
}
|
||||
Error::AddressOverflow => "An address calculation overflowed.",
|
||||
Error::CfiInstructionInInvalidContext => {
|
||||
"Encountered a call frame instruction in a context in which it is not valid."
|
||||
}
|
||||
|
||||
1
third_party/rust/gimli/src/read/op.rs
vendored
1
third_party/rust/gimli/src/read/op.rs
vendored
@@ -2022,7 +2022,6 @@ mod tests {
|
||||
use crate::leb128;
|
||||
use crate::read::{EndianSlice, Error, Result, UnitOffset};
|
||||
use crate::test_util::GimliSectionMethods;
|
||||
use core::usize;
|
||||
use test_assembler::{Endian, Section};
|
||||
|
||||
fn encoding4() -> Encoding {
|
||||
|
||||
52
third_party/rust/gimli/src/read/reader.rs
vendored
52
third_party/rust/gimli/src/read/reader.rs
vendored
@@ -187,6 +187,49 @@ impl ReaderOffset for usize {
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait for addresses within a DWARF section.
|
||||
///
|
||||
/// Currently this is a simple extension trait for `u64`, but it may be expanded
|
||||
/// in the future to support user-defined address types.
|
||||
pub(crate) trait ReaderAddress: Sized {
|
||||
/// Add a length to an address of the given size.
|
||||
///
|
||||
/// Returns an error for overflow.
|
||||
fn add_sized(self, length: u64, size: u8) -> Result<Self>;
|
||||
|
||||
/// Add a length to an address of the given size.
|
||||
///
|
||||
/// Wraps the result to the size of the address to allow for the possibility
|
||||
/// that the length is a negative value.
|
||||
fn wrapping_add_sized(self, length: u64, size: u8) -> Self;
|
||||
|
||||
/// The all-ones value of an address of the given size.
|
||||
fn ones_sized(size: u8) -> Self;
|
||||
}
|
||||
|
||||
impl ReaderAddress for u64 {
|
||||
#[inline]
|
||||
fn add_sized(self, length: u64, size: u8) -> Result<Self> {
|
||||
let address = self.checked_add(length).ok_or(Error::AddressOverflow)?;
|
||||
let mask = Self::ones_sized(size);
|
||||
if address & !mask != 0 {
|
||||
return Err(Error::AddressOverflow);
|
||||
}
|
||||
Ok(address)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn wrapping_add_sized(self, length: u64, size: u8) -> Self {
|
||||
let mask = Self::ones_sized(size);
|
||||
self.wrapping_add(length) & mask
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn ones_sized(size: u8) -> Self {
|
||||
!0 >> (64 - size * 8)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "read"))]
|
||||
pub(crate) mod seal_if_no_alloc {
|
||||
#[derive(Debug)]
|
||||
@@ -452,6 +495,15 @@ pub trait Reader: Debug + Clone {
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a byte and validate it as an address size.
|
||||
fn read_address_size(&mut self) -> Result<u8> {
|
||||
let size = self.read_u8()?;
|
||||
match size {
|
||||
1 | 2 | 4 | 8 => Ok(size),
|
||||
_ => Err(Error::UnsupportedAddressSize(size)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Read an address-sized integer, and return it as a `u64`.
|
||||
fn read_address(&mut self, address_size: u8) -> Result<u64> {
|
||||
match address_size {
|
||||
|
||||
18
third_party/rust/gimli/src/read/rnglists.rs
vendored
18
third_party/rust/gimli/src/read/rnglists.rs
vendored
@@ -5,8 +5,8 @@ use crate::common::{
|
||||
use crate::constants;
|
||||
use crate::endianity::Endianity;
|
||||
use crate::read::{
|
||||
lists::ListsHeader, DebugAddr, EndianSlice, Error, Reader, ReaderOffset, ReaderOffsetId,
|
||||
Result, Section,
|
||||
lists::ListsHeader, DebugAddr, EndianSlice, Error, Reader, ReaderAddress, ReaderOffset,
|
||||
ReaderOffsetId, Result, Section,
|
||||
};
|
||||
|
||||
/// The raw contents of the `.debug_ranges` section.
|
||||
@@ -527,7 +527,8 @@ impl<R: Reader> RngListIter<R> {
|
||||
/// The raw range should have been obtained from `next_raw`.
|
||||
#[doc(hidden)]
|
||||
pub fn convert_raw(&mut self, raw_range: RawRngListEntry<R::Offset>) -> Result<Option<Range>> {
|
||||
let mask = !0 >> (64 - self.raw.encoding.address_size * 8);
|
||||
let address_size = self.raw.encoding.address_size;
|
||||
let mask = u64::ones_sized(address_size);
|
||||
let tombstone = if self.raw.encoding.version <= 4 {
|
||||
mask - 1
|
||||
} else {
|
||||
@@ -550,7 +551,7 @@ impl<R: Reader> RngListIter<R> {
|
||||
}
|
||||
RawRngListEntry::StartxLength { begin, length } => {
|
||||
let begin = self.get_address(begin)?;
|
||||
let end = begin.wrapping_add(length) & mask;
|
||||
let end = begin.wrapping_add_sized(length, address_size);
|
||||
Range { begin, end }
|
||||
}
|
||||
RawRngListEntry::AddressOrOffsetPair { begin, end }
|
||||
@@ -564,7 +565,7 @@ impl<R: Reader> RngListIter<R> {
|
||||
}
|
||||
RawRngListEntry::StartEnd { begin, end } => Range { begin, end },
|
||||
RawRngListEntry::StartLength { begin, length } => {
|
||||
let end = begin.wrapping_add(length) & mask;
|
||||
let end = begin.wrapping_add_sized(length, address_size);
|
||||
Range { begin, end }
|
||||
}
|
||||
};
|
||||
@@ -624,7 +625,7 @@ impl RawRange {
|
||||
}
|
||||
|
||||
/// An address range from the `.debug_ranges`, `.debug_rnglists`, or `.debug_aranges` sections.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct Range {
|
||||
/// The beginning address of the range.
|
||||
pub begin: u64,
|
||||
@@ -637,9 +638,8 @@ impl Range {
|
||||
/// Add a base address to this range.
|
||||
#[inline]
|
||||
pub(crate) fn add_base_address(&mut self, base_address: u64, address_size: u8) {
|
||||
let mask = !0 >> (64 - address_size * 8);
|
||||
self.begin = base_address.wrapping_add(self.begin) & mask;
|
||||
self.end = base_address.wrapping_add(self.end) & mask;
|
||||
self.begin = base_address.wrapping_add_sized(self.begin, address_size);
|
||||
self.end = base_address.wrapping_add_sized(self.end, address_size);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
25
third_party/rust/gimli/src/read/unit.rs
vendored
25
third_party/rust/gimli/src/read/unit.rs
vendored
@@ -2,7 +2,6 @@
|
||||
|
||||
use core::cell::Cell;
|
||||
use core::ops::{Range, RangeFrom, RangeTo};
|
||||
use core::{u16, u8};
|
||||
|
||||
use crate::common::{
|
||||
DebugAbbrevOffset, DebugAddrBase, DebugAddrIndex, DebugInfoOffset, DebugLineOffset,
|
||||
@@ -575,7 +574,7 @@ where
|
||||
// reader.
|
||||
if 2 <= version && version <= 4 {
|
||||
abbrev_offset = parse_debug_abbrev_offset(&mut rest, format)?;
|
||||
address_size = rest.read_u8()?;
|
||||
address_size = rest.read_address_size()?;
|
||||
// Before DWARF5, all units in the .debug_info section are compilation
|
||||
// units, and all units in the .debug_types section are type units.
|
||||
unit_type = match unit_offset {
|
||||
@@ -584,7 +583,7 @@ where
|
||||
};
|
||||
} else if version == 5 {
|
||||
unit_type = parse_unit_type(&mut rest)?;
|
||||
address_size = rest.read_u8()?;
|
||||
address_size = rest.read_address_size()?;
|
||||
abbrev_offset = parse_debug_abbrev_offset(&mut rest, format)?;
|
||||
} else {
|
||||
return Err(Error::UnknownVersion(u64::from(version)));
|
||||
@@ -1849,7 +1848,7 @@ where
|
||||
AttributeValue::Data8(data) => data as i64,
|
||||
AttributeValue::Sdata(data) => data,
|
||||
AttributeValue::Udata(data) => {
|
||||
if data > i64::max_value() as u64 {
|
||||
if data > i64::MAX as u64 {
|
||||
// Maybe we should emit a warning here
|
||||
return None;
|
||||
}
|
||||
@@ -4197,28 +4196,24 @@ mod tests {
|
||||
)] = &[
|
||||
(AttributeValue::Data1(1), Some(1), Some(1)),
|
||||
(
|
||||
AttributeValue::Data1(core::u8::MAX),
|
||||
Some(u64::from(std::u8::MAX)),
|
||||
AttributeValue::Data1(u8::MAX),
|
||||
Some(u64::from(u8::MAX)),
|
||||
Some(-1),
|
||||
),
|
||||
(AttributeValue::Data2(1), Some(1), Some(1)),
|
||||
(
|
||||
AttributeValue::Data2(core::u16::MAX),
|
||||
Some(u64::from(std::u16::MAX)),
|
||||
AttributeValue::Data2(u16::MAX),
|
||||
Some(u64::from(u16::MAX)),
|
||||
Some(-1),
|
||||
),
|
||||
(AttributeValue::Data4(1), Some(1), Some(1)),
|
||||
(
|
||||
AttributeValue::Data4(core::u32::MAX),
|
||||
Some(u64::from(std::u32::MAX)),
|
||||
AttributeValue::Data4(u32::MAX),
|
||||
Some(u64::from(u32::MAX)),
|
||||
Some(-1),
|
||||
),
|
||||
(AttributeValue::Data8(1), Some(1), Some(1)),
|
||||
(
|
||||
AttributeValue::Data8(core::u64::MAX),
|
||||
Some(core::u64::MAX),
|
||||
Some(-1),
|
||||
),
|
||||
(AttributeValue::Data8(u64::MAX), Some(u64::MAX), Some(-1)),
|
||||
(AttributeValue::Sdata(1), Some(1), Some(1)),
|
||||
(AttributeValue::Sdata(-1), None, Some(-1)),
|
||||
(AttributeValue::Udata(1), Some(1), Some(1)),
|
||||
|
||||
3
third_party/rust/gimli/src/write/cfi.rs
vendored
3
third_party/rust/gimli/src/write/cfi.rs
vendored
@@ -212,8 +212,7 @@ impl CommonInformationEntry {
|
||||
|
||||
if encoding.version >= 4 {
|
||||
w.write_u8(encoding.address_size)?;
|
||||
// TODO: segment_selector_size
|
||||
w.write_u8(0)?;
|
||||
w.write_u8(0)?; // segment_selector_size
|
||||
}
|
||||
|
||||
w.write_uleb128(self.code_alignment_factor.into())?;
|
||||
|
||||
75
third_party/rust/gimli/src/write/line.rs
vendored
75
third_party/rust/gimli/src/write/line.rs
vendored
@@ -67,6 +67,12 @@ pub struct LineProgram {
|
||||
/// For version 5, this controls whether to emit `DW_LNCT_MD5`.
|
||||
pub file_has_md5: bool,
|
||||
|
||||
/// True if the file entries have embedded source code.
|
||||
///
|
||||
/// For version <= 4, this is ignored.
|
||||
/// For version 5, this controls whether to emit `DW_LNCT_LLVM_source`.
|
||||
pub file_has_source: bool,
|
||||
|
||||
prev_row: LineRow,
|
||||
row: LineRow,
|
||||
// TODO: this probably should be either rows or sequences instead
|
||||
@@ -119,6 +125,7 @@ impl LineProgram {
|
||||
file_has_timestamp: false,
|
||||
file_has_size: false,
|
||||
file_has_md5: false,
|
||||
file_has_source: false,
|
||||
};
|
||||
// For all DWARF versions, directory index 0 is comp_dir.
|
||||
// For version <= 4, the entry is implicit. We still add
|
||||
@@ -153,6 +160,7 @@ impl LineProgram {
|
||||
file_has_timestamp: false,
|
||||
file_has_size: false,
|
||||
file_has_md5: false,
|
||||
file_has_source: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -592,7 +600,8 @@ impl LineProgram {
|
||||
let count = 2
|
||||
+ if self.file_has_timestamp { 1 } else { 0 }
|
||||
+ if self.file_has_size { 1 } else { 0 }
|
||||
+ if self.file_has_md5 { 1 } else { 0 };
|
||||
+ if self.file_has_md5 { 1 } else { 0 }
|
||||
+ if self.file_has_source { 1 } else { 0 };
|
||||
w.write_u8(count)?;
|
||||
w.write_uleb128(u64::from(constants::DW_LNCT_path.0))?;
|
||||
let file_form = self.comp_file.0.form();
|
||||
@@ -611,6 +620,10 @@ impl LineProgram {
|
||||
w.write_uleb128(u64::from(constants::DW_LNCT_MD5.0))?;
|
||||
w.write_uleb128(constants::DW_FORM_data16.0.into())?;
|
||||
}
|
||||
if self.file_has_source {
|
||||
w.write_uleb128(u64::from(constants::DW_LNCT_LLVM_source.0))?;
|
||||
w.write_uleb128(constants::DW_FORM_string.0.into())?;
|
||||
}
|
||||
|
||||
// File name entries.
|
||||
w.write_uleb128(self.files.len() as u64 + 1)?;
|
||||
@@ -632,6 +645,20 @@ impl LineProgram {
|
||||
if self.file_has_md5 {
|
||||
w.write(&info.md5)?;
|
||||
}
|
||||
if self.file_has_source {
|
||||
// Note: An empty DW_LNCT_LLVM_source is interpreted as missing
|
||||
// source code. Included source code should always be
|
||||
// terminated by a "\n" line ending.
|
||||
let empty_str = LineString::String(Vec::new());
|
||||
let source = info.source.as_ref().unwrap_or(&empty_str);
|
||||
source.write(
|
||||
w,
|
||||
constants::DW_FORM_string,
|
||||
self.encoding,
|
||||
debug_line_str_offsets,
|
||||
debug_str_offsets,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
write_file(&self.comp_file.0, DirectoryId(0), &self.comp_file.1)?;
|
||||
@@ -937,7 +964,7 @@ mod id {
|
||||
pub use self::id::*;
|
||||
|
||||
/// Extra information for file in a `LineProgram`.
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq)]
|
||||
pub struct FileInfo {
|
||||
/// The implementation defined timestamp of the last modification of the file,
|
||||
/// or 0 if not available.
|
||||
@@ -950,6 +977,15 @@ pub struct FileInfo {
|
||||
///
|
||||
/// Only used if version >= 5 and `LineProgram::file_has_md5` is `true`.
|
||||
pub md5: [u8; 16],
|
||||
|
||||
/// Optionally some embedded sourcecode.
|
||||
///
|
||||
/// Only used if version >= 5 and `LineProgram::file_has_source` is `true`.
|
||||
///
|
||||
/// NOTE: This currently only supports the `LineString::String` variant,
|
||||
/// since we're encoding the string with `DW_FORM_string`.
|
||||
/// Other variants will result in an `LineStringFormMismatch` error.
|
||||
pub source: Option<LineString>,
|
||||
}
|
||||
|
||||
define_section!(
|
||||
@@ -999,6 +1035,15 @@ mod convert {
|
||||
timestamp: comp_file.timestamp(),
|
||||
size: comp_file.size(),
|
||||
md5: *comp_file.md5(),
|
||||
source: match comp_file.source() {
|
||||
Some(source) => Some(LineString::from(
|
||||
source,
|
||||
dwarf,
|
||||
line_strings,
|
||||
strings,
|
||||
)?),
|
||||
None => None,
|
||||
},
|
||||
}),
|
||||
)
|
||||
}
|
||||
@@ -1040,6 +1085,7 @@ mod convert {
|
||||
program.file_has_timestamp = from_header.file_has_timestamp();
|
||||
program.file_has_size = from_header.file_has_size();
|
||||
program.file_has_md5 = from_header.file_has_md5();
|
||||
program.file_has_source = from_header.file_has_source();
|
||||
for from_file in from_header.file_names().iter().skip(file_skip) {
|
||||
let from_name =
|
||||
LineString::from(from_file.path_name(), dwarf, line_strings, strings)?;
|
||||
@@ -1052,6 +1098,12 @@ mod convert {
|
||||
timestamp: from_file.timestamp(),
|
||||
size: from_file.size(),
|
||||
md5: *from_file.md5(),
|
||||
source: match from_file.source() {
|
||||
Some(source) => {
|
||||
Some(LineString::from(source, dwarf, line_strings, strings)?)
|
||||
}
|
||||
None => None,
|
||||
},
|
||||
});
|
||||
files.push(program.add_file(from_name, from_dir, from_info));
|
||||
}
|
||||
@@ -1074,13 +1126,14 @@ mod convert {
|
||||
Some(val) => address = Some(val),
|
||||
None => return Err(ConvertError::InvalidAddress),
|
||||
}
|
||||
from_row.execute(read::LineInstruction::SetAddress(0), &mut from_program);
|
||||
from_row
|
||||
.execute(read::LineInstruction::SetAddress(0), &mut from_program)?;
|
||||
}
|
||||
read::LineInstruction::DefineFile(_) => {
|
||||
return Err(ConvertError::UnsupportedLineInstruction);
|
||||
}
|
||||
_ => {
|
||||
if from_row.execute(instruction, &mut from_program) {
|
||||
if from_row.execute(instruction, &mut from_program)? {
|
||||
if !program.in_sequence() {
|
||||
program.begin_sequence(address);
|
||||
address = None;
|
||||
@@ -1190,6 +1243,13 @@ mod tests {
|
||||
program.file_has_md5 = true;
|
||||
}
|
||||
|
||||
// Note: Embedded source code is an accepted extension
|
||||
// that will become part of DWARF v6. We're using the LLVM extension
|
||||
// here for v5.
|
||||
if encoding.version >= 5 {
|
||||
program.file_has_source = true;
|
||||
}
|
||||
|
||||
let dir_id = program.add_directory(dir2.clone());
|
||||
assert_eq!(&dir2, program.get_directory(dir_id));
|
||||
assert_eq!(dir_id, program.add_directory(dir2.clone()));
|
||||
@@ -1202,8 +1262,11 @@ mod tests {
|
||||
} else {
|
||||
[0; 16]
|
||||
},
|
||||
source: (encoding.version >= 5)
|
||||
.then(|| LineString::String(b"the source code\n".to_vec())),
|
||||
};
|
||||
let file_id = program.add_file(file2.clone(), dir_id, Some(file_info));
|
||||
let file_id =
|
||||
program.add_file(file2.clone(), dir_id, Some(file_info.clone()));
|
||||
assert_eq!((&file2, dir_id), program.get_file(file_id));
|
||||
assert_eq!(file_info, *program.get_file_info(file_id));
|
||||
|
||||
@@ -1213,7 +1276,7 @@ mod tests {
|
||||
assert_ne!(file_info, *program.get_file_info(file_id));
|
||||
assert_eq!(
|
||||
file_id,
|
||||
program.add_file(file2.clone(), dir_id, Some(file_info))
|
||||
program.add_file(file2.clone(), dir_id, Some(file_info.clone()))
|
||||
);
|
||||
assert_eq!(file_info, *program.get_file_info(file_id));
|
||||
|
||||
|
||||
2
third_party/rust/gimli/src/write/unit.rs
vendored
2
third_party/rust/gimli/src/write/unit.rs
vendored
@@ -1,6 +1,6 @@
|
||||
use alloc::vec::Vec;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::{slice, usize};
|
||||
use std::slice;
|
||||
|
||||
use crate::common::{
|
||||
DebugAbbrevOffset, DebugInfoOffset, DebugLineOffset, DebugMacinfoOffset, DebugMacroOffset,
|
||||
|
||||
1
third_party/rust/gimli/src/write/writer.rs
vendored
1
third_party/rust/gimli/src/write/writer.rs
vendored
@@ -329,7 +329,6 @@ mod tests {
|
||||
use super::*;
|
||||
use crate::write;
|
||||
use crate::{BigEndian, LittleEndian};
|
||||
use std::{i64, u64};
|
||||
|
||||
#[test]
|
||||
fn test_writer() {
|
||||
|
||||
2
third_party/rust/goblin/.cargo-checksum.json
vendored
2
third_party/rust/goblin/.cargo-checksum.json
vendored
@@ -1 +1 @@
|
||||
{"files":{"CHANGELOG.md":"50453c2109df5663c2e96b8f1f23525249c5fc2f59c8f74ec5c2b3646ac21144","Cargo.lock":"b9dbea5eb49f6f64c350b16cc7c13dc16086e6888b7a622794393400137a4280","Cargo.toml":"f57048482755f5bddc3ed3a06b469ebec4923b647df060ae53f3a349932ddf76","LICENSE":"655e3ee7a4c27430774962e62a6d37d7348e5f2f292010ad674ce1bebefd24bc","README.md":"d108b8d55ecd89934af0d94c6c86d4e39ed2daefa8d2ad5a229cae61dfe6f531","src/archive/mod.rs":"ae739638d7267011bedf51712516d3485171d8f2df2ab6746a0d942d86efd6a6","src/elf/compression_header.rs":"2eb5fdda9177c1c897310d86714967de019b39c6e23b1f3a890dd3a659be0acc","src/elf/constants_header.rs":"f2ede290ecacf60b1719e9994aa45612bf0f7baf63806a293d4530a674e5861a","src/elf/constants_relocation.rs":"2837231dd3e2341008842e031b81cbb9999214a2f9e6738c6374a5464d62555d","src/elf/dynamic.rs":"907146d1968f656fc9cc3621037c193877e30675ccd8ec6eb2e3adbc1e2afd27","src/elf/gnu_hash.rs":"4592b5516d807a61a9dccc3f97266587c032eea621708bd78e23c70be6128228","src/elf/header.rs":"3391a1fa9b8e3923f7ce74caff0668d8ddb5b34767bf3da309ff497fd81c34c7","src/elf/mod.rs":"2ee0faa0917deb5e90ca60e9c852434745a4c7f553e609e9603a57b7d55b739f","src/elf/note.rs":"bf5e45e2697f7700d5adbb52f890ea4c63b70b7077ca0e7c751420bb92923529","src/elf/program_header.rs":"4c322eb124c4e2bdeec4915067d2bb11fe9e7fba1811dc351a3f7581df121da0","src/elf/reloc.rs":"e952fc4f79ac6a08f218a4758321ab94f172c376dc5235a82f70732682cca82f","src/elf/section_header.rs":"72eb788e8807f16a97683d20add21d5c3feaae06813509e2a87b76a7cd0c376f","src/elf/sym.rs":"267996f926f337b88058908af260be30473afbe1fe6d72cdeb8dd0ed474671d8","src/elf/symver.rs":"3f899201f64a702653d44288f860003e7acd75e38111d36479af823ed92b1341","src/error.rs":"a1bb56d82db52ac627e55b163f489f06a78c939a8ccfdec210b4f726d6ed6e9d","src/lib.rs":"f29832bdf7d7f7d9e34f65704afea2710d578df60cc171dd179b5ce889faaf12","src/mach/bind_opcodes.rs":"1dcacfb853d05c2c7e6dbb4509ee705a8ea645db0d334991a2293fef92eee851","src/mach/constants.rs":"c2a2381a0b9c3047d37582465e8965d995dca414d0da21fb7bcc6b8334e49eb6","src/mach/exports.rs":"d22122744673a3ce5f54b2b4b20bfa47d17378e64d3dda2858dd13add74ed3dc","src/mach/fat.rs":"45a3228aaa1ab8b77f322dd4924b7383f1357e226ffc079846d67c0268389ea7","src/mach/header.rs":"006619188f51fa43051dc04aa4b2ecd5f89136cf05cb6a7b23a228228008e6ae","src/mach/imports.rs":"2153269dfff32e23d72f76a82d658be06bd79b7e35d79b7e17115e4eb24b13d5","src/mach/load_command.rs":"42e6f0973092185db233230e71e9312bbac7c2e1090bb6d713804020319dfa33","src/mach/mod.rs":"f1e120b7aabe370fa2af43e359f97ffa3e187fdb5743ef19c37402264e92b326","src/mach/relocation.rs":"11b0b76ed7d997c87e396100515f931fe84473c228bed0e980fbab311530070a","src/mach/segment.rs":"947acd8a724b41d0afbbd9e2727f41be51f1be439f47417258e829db1a4765e6","src/mach/symbols.rs":"d2505fa8d65ea267abfcb6a9fc4d1acd47d5605aa6775935757e2fa8e92af507","src/pe/authenticode.rs":"36b5b3ddc9806f679cf21418bc13af4b277eba87096304dfb50946bc0f941206","src/pe/certificate_table.rs":"f6c31ba518d9fc4b6e12d2f24d6c9d58b21b341a1f189cbcf2aae0ae51304ad3","src/pe/characteristic.rs":"2ffa012ec225f3c8570689713969a7dc34a92eaf4f944a27881fd0c248cc8b20","src/pe/data_directories.rs":"d0352ccc03e0ab2935235e91b391cc55828406087f026f90ec11ca5906fd8c8c","src/pe/debug.rs":"3811c616a9b6d6b54e15348bb369b794bb89532e04fe19eca91b745d7c51a553","src/pe/dll_characteristic.rs":"d63e86ecb38ccdd81493268be34535391b794651d619e8d4ccc1a56aa10e1679","src/pe/exception.rs":"3935900334692a6f54f7176eca044688289834bcde1b579b88d6ed1af3c3c005","src/pe/export.rs":"c98f5ce0b1b18bb87f06d1d41dbf70f443d65ecb1624cb23a1ef6c5f93a892e1","src/pe/header.rs":"dea84fd7101aff56c7cb6ff08a1efdde5cf43a04328397b6177d36ab1c2a3774","src/pe/import.rs":"855276e46c01ccd7631104e4d1265592e36c9468aadcacc937a40c29d94aabe3","src/pe/mod.rs":"1d8a7cb3ddb8443e34939adda4794308a31737bc29f66668d36dfa22950ba69f","src/pe/optional_header.rs":"f2411a0f272e22c280a1fe3c15919b07d1f152448b47db31acaacad8a0a9a153","src/pe/options.rs":"457877197f768c331437297d787dc718b1053b813e3a1dd9b968133fb1540d44","src/pe/relocation.rs":"c479b80bb1d6910f2168505dda4f2d8925b7edc34bed4e25d069546f88f52bb3","src/pe/section_table.rs":"e4b1a2f78c2336aaa0355b5ef102dbe29138c4fa1ba29ed3f379aad1fc64bdff","src/pe/subsystem.rs":"162a851e217b617aa8afa1b83e37ea9c5a793f76a17be57b56b550d7cabb7b8a","src/pe/symbol.rs":"1a5fb5bec5727752a6506682ed2ab57829ea810f21f951932a0107861ec0e092","src/pe/utils.rs":"e6da9979ba5f2ae7d1274eef8230cdc4dd90c90a79c7bb9438f8b8ff0aef74be","src/strtab.rs":"110c774b2998514b4d0be1d575b3e2a8eb85f801b6f782e4ed3a8f7521920689","tests/bins/elf/gnu_hash/README.md":"52581e2ea7067a55bd8aedf4079200fb76448573ae9ffef7d886b9556e980db9"},"package":"1b363a30c165f666402fe6a3024d3bec7ebc898f96a4a23bd1c99f8dbf3f4f47"}
|
||||
{"files":{"CHANGELOG.md":"052930023a92d0dbf45d189df2ecf26e5469ef5c19d7140ea1290c44f9fa9c35","Cargo.lock":"d5985b8c733c8687aa69004e66d67e15db06f6b4eb8615df053fe404f8b7e017","Cargo.toml":"917e3955b6516af682981eb51bb7d665155a804674cb9f8871bfff9d9a745923","LICENSE":"655e3ee7a4c27430774962e62a6d37d7348e5f2f292010ad674ce1bebefd24bc","README.md":"3cdbab44cacba8d4d39443affba422adf22d7f00d9c68bac80c7d25b94520dc5","src/archive/mod.rs":"ae739638d7267011bedf51712516d3485171d8f2df2ab6746a0d942d86efd6a6","src/elf/compression_header.rs":"2eb5fdda9177c1c897310d86714967de019b39c6e23b1f3a890dd3a659be0acc","src/elf/constants_header.rs":"f2ede290ecacf60b1719e9994aa45612bf0f7baf63806a293d4530a674e5861a","src/elf/constants_relocation.rs":"2837231dd3e2341008842e031b81cbb9999214a2f9e6738c6374a5464d62555d","src/elf/dynamic.rs":"907146d1968f656fc9cc3621037c193877e30675ccd8ec6eb2e3adbc1e2afd27","src/elf/gnu_hash.rs":"4592b5516d807a61a9dccc3f97266587c032eea621708bd78e23c70be6128228","src/elf/header.rs":"3391a1fa9b8e3923f7ce74caff0668d8ddb5b34767bf3da309ff497fd81c34c7","src/elf/mod.rs":"2ee0faa0917deb5e90ca60e9c852434745a4c7f553e609e9603a57b7d55b739f","src/elf/note.rs":"bf5e45e2697f7700d5adbb52f890ea4c63b70b7077ca0e7c751420bb92923529","src/elf/program_header.rs":"4c322eb124c4e2bdeec4915067d2bb11fe9e7fba1811dc351a3f7581df121da0","src/elf/reloc.rs":"e952fc4f79ac6a08f218a4758321ab94f172c376dc5235a82f70732682cca82f","src/elf/section_header.rs":"72eb788e8807f16a97683d20add21d5c3feaae06813509e2a87b76a7cd0c376f","src/elf/sym.rs":"267996f926f337b88058908af260be30473afbe1fe6d72cdeb8dd0ed474671d8","src/elf/symver.rs":"3f899201f64a702653d44288f860003e7acd75e38111d36479af823ed92b1341","src/error.rs":"a1bb56d82db52ac627e55b163f489f06a78c939a8ccfdec210b4f726d6ed6e9d","src/lib.rs":"06771b56b262fa30396e4bacbf0a4996b6088d1cfa5defa20dedf69a2c58d3b3","src/mach/bind_opcodes.rs":"1dcacfb853d05c2c7e6dbb4509ee705a8ea645db0d334991a2293fef92eee851","src/mach/constants.rs":"c2a2381a0b9c3047d37582465e8965d995dca414d0da21fb7bcc6b8334e49eb6","src/mach/exports.rs":"d22122744673a3ce5f54b2b4b20bfa47d17378e64d3dda2858dd13add74ed3dc","src/mach/fat.rs":"45a3228aaa1ab8b77f322dd4924b7383f1357e226ffc079846d67c0268389ea7","src/mach/header.rs":"006619188f51fa43051dc04aa4b2ecd5f89136cf05cb6a7b23a228228008e6ae","src/mach/imports.rs":"2153269dfff32e23d72f76a82d658be06bd79b7e35d79b7e17115e4eb24b13d5","src/mach/load_command.rs":"42e6f0973092185db233230e71e9312bbac7c2e1090bb6d713804020319dfa33","src/mach/mod.rs":"f1e120b7aabe370fa2af43e359f97ffa3e187fdb5743ef19c37402264e92b326","src/mach/relocation.rs":"11b0b76ed7d997c87e396100515f931fe84473c228bed0e980fbab311530070a","src/mach/segment.rs":"947acd8a724b41d0afbbd9e2727f41be51f1be439f47417258e829db1a4765e6","src/mach/symbols.rs":"d2505fa8d65ea267abfcb6a9fc4d1acd47d5605aa6775935757e2fa8e92af507","src/pe/authenticode.rs":"36b5b3ddc9806f679cf21418bc13af4b277eba87096304dfb50946bc0f941206","src/pe/certificate_table.rs":"f6c31ba518d9fc4b6e12d2f24d6c9d58b21b341a1f189cbcf2aae0ae51304ad3","src/pe/characteristic.rs":"2ffa012ec225f3c8570689713969a7dc34a92eaf4f944a27881fd0c248cc8b20","src/pe/data_directories.rs":"d0352ccc03e0ab2935235e91b391cc55828406087f026f90ec11ca5906fd8c8c","src/pe/debug.rs":"485758ff505c070da2a26df9099b90fc421e679b05b520d7b13c95a63647d1a0","src/pe/dll_characteristic.rs":"d63e86ecb38ccdd81493268be34535391b794651d619e8d4ccc1a56aa10e1679","src/pe/exception.rs":"3935900334692a6f54f7176eca044688289834bcde1b579b88d6ed1af3c3c005","src/pe/export.rs":"c98f5ce0b1b18bb87f06d1d41dbf70f443d65ecb1624cb23a1ef6c5f93a892e1","src/pe/header.rs":"9e765f03be5e2ee6d80add0fa4fa81f38e973d7bb646f8df31fdeda106e8aa1d","src/pe/import.rs":"855276e46c01ccd7631104e4d1265592e36c9468aadcacc937a40c29d94aabe3","src/pe/mod.rs":"21ea8aed0716df6e2f5c13658f40a05bfbf6ce2c467dfd2391f661896c79d54b","src/pe/optional_header.rs":"f2411a0f272e22c280a1fe3c15919b07d1f152448b47db31acaacad8a0a9a153","src/pe/options.rs":"457877197f768c331437297d787dc718b1053b813e3a1dd9b968133fb1540d44","src/pe/relocation.rs":"c479b80bb1d6910f2168505dda4f2d8925b7edc34bed4e25d069546f88f52bb3","src/pe/section_table.rs":"e4b1a2f78c2336aaa0355b5ef102dbe29138c4fa1ba29ed3f379aad1fc64bdff","src/pe/subsystem.rs":"162a851e217b617aa8afa1b83e37ea9c5a793f76a17be57b56b550d7cabb7b8a","src/pe/symbol.rs":"1a5fb5bec5727752a6506682ed2ab57829ea810f21f951932a0107861ec0e092","src/pe/tls.rs":"d674d46c870e090e90e6b709620abd3de990ae1f85a66253b81004cce03d1b6a","src/pe/utils.rs":"adf5b8bd79e90211e82cda8f01ee775b9cdfd20bfafdee36c54000daea8592c0","src/strtab.rs":"110c774b2998514b4d0be1d575b3e2a8eb85f801b6f782e4ed3a8f7521920689","tests/bins/elf/gnu_hash/README.md":"52581e2ea7067a55bd8aedf4079200fb76448573ae9ffef7d886b9556e980db9","tests/bins/te/README.md":"a0daf347449bcf82c38d981b2a700d9fd4657c3a7e7dbfa22f90e74750c6bc0d"},"package":"53ab3f32d1d77146981dea5d6b1e8fe31eedcb7013e5e00d6ccd1259a4b4d923"}
|
||||
25
third_party/rust/goblin/CHANGELOG.md
vendored
25
third_party/rust/goblin/CHANGELOG.md
vendored
@@ -3,9 +3,30 @@ All notable changes to this project will be documented in this file.
|
||||
|
||||
Before 1.0, this project does not adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||
|
||||
Goblin is now 0.8, which means we will try our best to ease breaking changes. Tracking issue is here: https://github.com/m4b/goblin/issues/97
|
||||
Goblin is now 0.9, which means we will try our best to ease breaking changes. Tracking issue is here: https://github.com/m4b/goblin/issues/97
|
||||
|
||||
## [0.8.1] - 2024-04-27
|
||||
## [0.9.2] - 2024-10-26
|
||||
### Fixed
|
||||
pe: fix PE with zero `raw_data_size` of section, thanks @ideeockus: https://github.com/m4b/goblin/pull/396
|
||||
### Added
|
||||
pe: allow parsing pe::Header without dos stubs, `Header::parse_without_dos`, thanks @ideeockus: https://github.com/m4b/goblin/pull/396
|
||||
|
||||
## [0.9.1] - 2024-10-24
|
||||
### (hot) Fix
|
||||
pe: fix parsing of tls in certain cases (issue: https://github.com/m4b/goblin/issues/424), thanks @kkent030315: https://github.com/m4b/goblin/pull/425
|
||||
|
||||
## [0.9.0] - 2024-10-20
|
||||
### Added, Breaking
|
||||
pe: add TE (terse executable) support, big thanks @Javagedes: https://github.com/m4b/goblin/pull/397
|
||||
pe: add support for codeview PDB 2.0, thanks @joschock: https://github.com/m4b/goblin/pull/409
|
||||
pe: parse TLS in data directories, thanks @kkent030315: https://github.com/m4b/goblin/pull/404
|
||||
|
||||
## [0.8.2] - 2024-04-29
|
||||
Everything in 0.8.1 except TE support in https://github.com/m4b/goblin/pull/397 was reverted,
|
||||
due to it being technically a breaking change.
|
||||
0.8.1 was yanked from crates.
|
||||
|
||||
## [0.8.1] - 2024-04-27 (YANKED)
|
||||
### Docs
|
||||
pe: document pe header, thanks @JohnScience: https://github.com/m4b/goblin/pull/399
|
||||
pe, elf: fix doc warnings, thanks @5225225: https://github.com/m4b/goblin/pull/395
|
||||
|
||||
2
third_party/rust/goblin/Cargo.lock
generated
vendored
2
third_party/rust/goblin/Cargo.lock
generated
vendored
@@ -74,7 +74,7 @@ checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f"
|
||||
|
||||
[[package]]
|
||||
name = "goblin"
|
||||
version = "0.8.2"
|
||||
version = "0.9.2"
|
||||
dependencies = [
|
||||
"log",
|
||||
"plain",
|
||||
|
||||
18
third_party/rust/goblin/Cargo.toml
vendored
18
third_party/rust/goblin/Cargo.toml
vendored
@@ -13,7 +13,7 @@
|
||||
edition = "2021"
|
||||
rust-version = "1.63.0"
|
||||
name = "goblin"
|
||||
version = "0.8.2"
|
||||
version = "0.9.2"
|
||||
authors = [
|
||||
"m4b <m4b.github.io@gmail.com>",
|
||||
"seu <seu@panopticon.re>",
|
||||
@@ -21,6 +21,7 @@ authors = [
|
||||
"Philip Craig <philipjcraig@gmail.com>",
|
||||
"Lzu Tao <taolzu@gmail.com>",
|
||||
]
|
||||
build = false
|
||||
include = [
|
||||
"src",
|
||||
"CHANGELOG.md",
|
||||
@@ -28,6 +29,10 @@ include = [
|
||||
"LICENSE",
|
||||
"README.md",
|
||||
]
|
||||
autobins = false
|
||||
autoexamples = false
|
||||
autotests = false
|
||||
autobenches = false
|
||||
description = "An impish, cross-platform, ELF, Mach-o, and PE binary parsing and loading crate"
|
||||
documentation = "https://docs.rs/goblin"
|
||||
readme = "README.md"
|
||||
@@ -45,6 +50,10 @@ categories = [
|
||||
license = "MIT"
|
||||
repository = "https://github.com/m4b/goblin"
|
||||
|
||||
[lib]
|
||||
name = "goblin"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies.log]
|
||||
version = "0.4"
|
||||
optional = true
|
||||
@@ -55,7 +64,7 @@ version = "0.2.3"
|
||||
|
||||
[dependencies.scroll]
|
||||
version = "0.12"
|
||||
default_features = false
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies.stderrlog]
|
||||
version = "0.5.4"
|
||||
@@ -74,6 +83,7 @@ default = [
|
||||
"mach64",
|
||||
"pe32",
|
||||
"pe64",
|
||||
"te",
|
||||
"archive",
|
||||
"endian_fd",
|
||||
]
|
||||
@@ -102,6 +112,10 @@ std = [
|
||||
"alloc",
|
||||
"scroll/std",
|
||||
]
|
||||
te = [
|
||||
"alloc",
|
||||
"endian_fd",
|
||||
]
|
||||
|
||||
[badges.travis-ci]
|
||||
branch = "master"
|
||||
|
||||
7
third_party/rust/goblin/README.md
vendored
7
third_party/rust/goblin/README.md
vendored
@@ -26,7 +26,7 @@ Add to your `Cargo.toml`
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
goblin = "0.8"
|
||||
goblin = "0.9"
|
||||
```
|
||||
|
||||
### Features
|
||||
@@ -97,6 +97,7 @@ Here are some things you could do with this crate (or help to implement so they
|
||||
* mach32 - 32-bit mach-o `repr(C)` struct defs
|
||||
* pe32 - 32-bit PE `repr(C)` struct defs
|
||||
* pe64 - 64-bit PE `repr(C)` struct defs
|
||||
+ te - Terse Executable (TE) `repr(C)` struct defs
|
||||
* archive - a Unix Archive parser
|
||||
* endian_fd - parses according to the endianness in the binary
|
||||
* std - to allow `no_std` environments
|
||||
@@ -124,6 +125,7 @@ In lexicographic order:
|
||||
- [@glandium]
|
||||
- [@h33p]
|
||||
- [@ibabushkin]
|
||||
- [@ideeockus]
|
||||
- [@jackcmay]
|
||||
- [@jan-auer]
|
||||
- [@Javagedes]
|
||||
@@ -132,6 +134,7 @@ In lexicographic order:
|
||||
- [@Jhynjhiruu]
|
||||
- [@johannst]
|
||||
- [@JohnScience]
|
||||
- [@joschock]
|
||||
- [@jrmuizel]
|
||||
- [@jsgf]
|
||||
- [@keith]
|
||||
@@ -207,6 +210,7 @@ In lexicographic order:
|
||||
[@glandium]: https://github.com/glandium
|
||||
[@h33p]: https://github.com/h33p
|
||||
[@ibabushkin]: https://github.com/ibabushkin
|
||||
[@ideeockus]: https://github.com/ideeockus
|
||||
[@jackcmay]: https://github.com/jackcmay
|
||||
[@jan-auer]: https://github.com/jan-auer
|
||||
[@Javagedes]: https://github.com/Javagedes
|
||||
@@ -214,6 +218,7 @@ In lexicographic order:
|
||||
[@Jhynjhiruu]: https://github.com/Jhynjhiruu
|
||||
[@JohnScience]: https://github.com/JohnScience
|
||||
[@johannst]: https://github.com/johannst
|
||||
[@joschock]: https://github.com/joschock
|
||||
[@jdub]: https://github.com/jdub
|
||||
[@jrmuizel]: https://github.com/jrmuizel
|
||||
[@jsgf]: https://github.com/jsgf
|
||||
|
||||
7
third_party/rust/goblin/src/lib.rs
vendored
7
third_party/rust/goblin/src/lib.rs
vendored
@@ -229,6 +229,7 @@ pub enum Hint {
|
||||
Mach(HintData),
|
||||
MachFat(usize),
|
||||
PE,
|
||||
TE,
|
||||
COFF,
|
||||
Archive,
|
||||
Unknown(u64),
|
||||
@@ -236,7 +237,7 @@ pub enum Hint {
|
||||
|
||||
macro_rules! if_everything {
|
||||
($($i:item)*) => ($(
|
||||
#[cfg(all(feature = "endian_fd", feature = "elf64", feature = "elf32", feature = "pe64", feature = "pe32", feature = "mach64", feature = "mach32", feature = "archive"))]
|
||||
#[cfg(all(feature = "endian_fd", feature = "elf64", feature = "elf32", feature = "pe64", feature = "pe32", feature = "te", feature = "mach64", feature = "mach32", feature = "archive"))]
|
||||
$i
|
||||
)*)
|
||||
}
|
||||
@@ -262,6 +263,7 @@ if_everything! {
|
||||
} else {
|
||||
match *&bytes[0..2].pread_with::<u16>(0, LE)? {
|
||||
pe::header::DOS_MAGIC => Ok(Hint::PE),
|
||||
pe::header::TE_MAGIC => Ok(Hint::TE),
|
||||
pe::header::COFF_MACHINE_X86 |
|
||||
pe::header::COFF_MACHINE_X86_64 |
|
||||
pe::header::COFF_MACHINE_ARM64 => Ok(Hint::COFF),
|
||||
@@ -290,6 +292,8 @@ if_everything! {
|
||||
Elf(elf::Elf<'a>),
|
||||
/// A PE32/PE32+!
|
||||
PE(pe::PE<'a>),
|
||||
/// A TE!
|
||||
TE(pe::TE<'a>),
|
||||
/// A COFF
|
||||
COFF(pe::Coff<'a>),
|
||||
/// A 32/64-bit Mach-o binary _OR_ it is a multi-architecture binary container!
|
||||
@@ -309,6 +313,7 @@ if_everything! {
|
||||
Hint::Mach(_) | Hint::MachFat(_) => Ok(Object::Mach(mach::Mach::parse(bytes)?)),
|
||||
Hint::Archive => Ok(Object::Archive(archive::Archive::parse(bytes)?)),
|
||||
Hint::PE => Ok(Object::PE(pe::PE::parse(bytes)?)),
|
||||
Hint::TE => Ok(Object::TE(pe::TE::parse(bytes)?)),
|
||||
Hint::COFF => Ok(Object::COFF(pe::Coff::parse(bytes)?)),
|
||||
Hint::Unknown(magic) => Ok(Object::Unknown(magic)),
|
||||
}
|
||||
|
||||
77
third_party/rust/goblin/src/pe/debug.rs
vendored
77
third_party/rust/goblin/src/pe/debug.rs
vendored
@@ -10,6 +10,7 @@ use crate::pe::utils;
|
||||
pub struct DebugData<'a> {
|
||||
pub image_debug_directory: ImageDebugDirectory,
|
||||
pub codeview_pdb70_debug_info: Option<CodeviewPDB70DebugInfo<'a>>,
|
||||
pub codeview_pdb20_debug_info: Option<CodeviewPDB20DebugInfo<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> DebugData<'a> {
|
||||
@@ -39,10 +40,13 @@ impl<'a> DebugData<'a> {
|
||||
ImageDebugDirectory::parse_with_opts(bytes, dd, sections, file_alignment, opts)?;
|
||||
let codeview_pdb70_debug_info =
|
||||
CodeviewPDB70DebugInfo::parse_with_opts(bytes, &image_debug_directory, opts)?;
|
||||
let codeview_pdb20_debug_info =
|
||||
CodeviewPDB20DebugInfo::parse_with_opts(bytes, &image_debug_directory, opts)?;
|
||||
|
||||
Ok(DebugData {
|
||||
image_debug_directory,
|
||||
codeview_pdb70_debug_info,
|
||||
codeview_pdb20_debug_info,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -92,7 +96,7 @@ impl ImageDebugDirectory {
|
||||
)
|
||||
}
|
||||
|
||||
fn parse_with_opts(
|
||||
pub(crate) fn parse_with_opts(
|
||||
bytes: &[u8],
|
||||
dd: data_directories::DataDirectory,
|
||||
sections: &[section_table::SectionTable],
|
||||
@@ -184,3 +188,74 @@ impl<'a> CodeviewPDB70DebugInfo<'a> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// http://llvm.org/doxygen/CVDebugRecord_8h_source.html
|
||||
#[repr(C)]
|
||||
#[derive(Debug, PartialEq, Copy, Clone, Default)]
|
||||
pub struct CodeviewPDB20DebugInfo<'a> {
|
||||
pub codeview_signature: u32,
|
||||
pub codeview_offset: u32,
|
||||
pub signature: u32,
|
||||
pub age: u32,
|
||||
pub filename: &'a [u8],
|
||||
}
|
||||
|
||||
impl<'a> CodeviewPDB20DebugInfo<'a> {
|
||||
pub fn parse(bytes: &'a [u8], idd: &ImageDebugDirectory) -> error::Result<Option<Self>> {
|
||||
Self::parse_with_opts(bytes, idd, &options::ParseOptions::default())
|
||||
}
|
||||
|
||||
pub fn parse_with_opts(
|
||||
bytes: &'a [u8],
|
||||
idd: &ImageDebugDirectory,
|
||||
opts: &options::ParseOptions,
|
||||
) -> error::Result<Option<Self>> {
|
||||
if idd.data_type != IMAGE_DEBUG_TYPE_CODEVIEW {
|
||||
// not a codeview debug directory
|
||||
// that's not an error, but it's not a CodeviewPDB20DebugInfo either
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// ImageDebugDirectory.pointer_to_raw_data stores a raw offset -- not a virtual offset -- which we can use directly
|
||||
let mut offset: usize = match opts.resolve_rva {
|
||||
true => idd.pointer_to_raw_data as usize,
|
||||
false => idd.address_of_raw_data as usize,
|
||||
};
|
||||
|
||||
// calculate how long the eventual filename will be, which doubles as a check of the record size
|
||||
let filename_length = idd.size_of_data as isize - 16;
|
||||
if filename_length < 0 {
|
||||
// the record is too short to be plausible
|
||||
return Err(error::Error::Malformed(format!(
|
||||
"ImageDebugDirectory size of data seems wrong: {:?}",
|
||||
idd.size_of_data
|
||||
)));
|
||||
}
|
||||
let filename_length = filename_length as usize;
|
||||
|
||||
// check the codeview signature
|
||||
let codeview_signature: u32 = bytes.gread_with(&mut offset, scroll::LE)?;
|
||||
if codeview_signature != CODEVIEW_PDB20_MAGIC {
|
||||
return Ok(None);
|
||||
}
|
||||
let codeview_offset: u32 = bytes.gread_with(&mut offset, scroll::LE)?;
|
||||
|
||||
// read the rest
|
||||
let signature: u32 = bytes.gread_with(&mut offset, scroll::LE)?;
|
||||
let age: u32 = bytes.gread_with(&mut offset, scroll::LE)?;
|
||||
if let Some(filename) = bytes.get(offset..offset + filename_length) {
|
||||
Ok(Some(CodeviewPDB20DebugInfo {
|
||||
codeview_signature,
|
||||
codeview_offset,
|
||||
signature,
|
||||
age,
|
||||
filename,
|
||||
}))
|
||||
} else {
|
||||
Err(error::Error::Malformed(format!(
|
||||
"ImageDebugDirectory seems corrupted: {:?}",
|
||||
idd
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
172
third_party/rust/goblin/src/pe/header.rs
vendored
172
third_party/rust/goblin/src/pe/header.rs
vendored
@@ -1,5 +1,5 @@
|
||||
use crate::error;
|
||||
use crate::pe::{optional_header, section_table, symbol};
|
||||
use crate::pe::{data_directories, optional_header, section_table, symbol};
|
||||
use crate::strtab;
|
||||
use alloc::vec::Vec;
|
||||
use log::debug;
|
||||
@@ -791,14 +791,7 @@ pub struct Header {
|
||||
}
|
||||
|
||||
impl Header {
|
||||
pub fn parse(bytes: &[u8]) -> error::Result<Self> {
|
||||
let dos_header = DosHeader::parse(&bytes)?;
|
||||
let dos_stub = bytes.pread(DOS_STUB_OFFSET as usize).map_err(|_| {
|
||||
error::Error::Malformed(format!(
|
||||
"cannot parse DOS stub (offset {:#x})",
|
||||
DOS_STUB_OFFSET
|
||||
))
|
||||
})?;
|
||||
fn parse_impl(bytes: &[u8], dos_header: DosHeader, dos_stub: DosStub) -> error::Result<Self> {
|
||||
let mut offset = dos_header.pe_pointer as usize;
|
||||
let signature = bytes.gread_with(&mut offset, scroll::LE).map_err(|_| {
|
||||
error::Error::Malformed(format!("cannot parse PE signature (offset {:#x})", offset))
|
||||
@@ -809,6 +802,7 @@ impl Header {
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Header {
|
||||
dos_header,
|
||||
dos_stub,
|
||||
@@ -817,6 +811,25 @@ impl Header {
|
||||
optional_header,
|
||||
})
|
||||
}
|
||||
|
||||
/// Parses PE header from the given bytes; this will fail if the DosHeader or DosStub is malformed or missing in some way
|
||||
pub fn parse(bytes: &[u8]) -> error::Result<Self> {
|
||||
let dos_header = DosHeader::parse(&bytes)?;
|
||||
let dos_stub = bytes.pread(DOS_STUB_OFFSET as usize).map_err(|_| {
|
||||
error::Error::Malformed(format!(
|
||||
"cannot parse DOS stub (offset {:#x})",
|
||||
DOS_STUB_OFFSET
|
||||
))
|
||||
})?;
|
||||
|
||||
Header::parse_impl(bytes, dos_header, dos_stub)
|
||||
}
|
||||
|
||||
/// Parses PE header from the given bytes, a default DosHeader and DosStub are generated, and any malformed header or stub is ignored
|
||||
pub fn parse_without_dos(bytes: &[u8]) -> error::Result<Self> {
|
||||
let dos_header = DosHeader::default();
|
||||
Header::parse_impl(bytes, dos_header, DosStub::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl ctx::TryIntoCtx<scroll::Endian> for Header {
|
||||
@@ -835,6 +848,147 @@ impl ctx::TryIntoCtx<scroll::Endian> for Header {
|
||||
}
|
||||
}
|
||||
|
||||
/// The TE header is a reduced PE32/PE32+ header containing only fields
|
||||
/// required for execution in the Platform Initialization
|
||||
/// ([PI](https://uefi.org/specs/PI/1.8/V1_Introduction.html)) architecture.
|
||||
/// The TE header is described in this specification:
|
||||
/// <https://uefi.org/specs/PI/1.8/V1_TE_Image.html#te-header>
|
||||
#[cfg(feature = "te")]
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Default, PartialEq, Copy, Clone, Pread, Pwrite)]
|
||||
pub struct TeHeader {
|
||||
/// Te signature, always [TE_MAGIC]
|
||||
pub signature: u16,
|
||||
/// The machine type
|
||||
pub machine: u16,
|
||||
/// The number of sections
|
||||
pub number_of_sections: u8,
|
||||
/// The subsystem
|
||||
pub subsystem: u8,
|
||||
/// the amount of bytes stripped from the header when converting from a
|
||||
/// PE32/PE32+ header to a TE header. Used to resolve addresses
|
||||
pub stripped_size: u16,
|
||||
/// The entry point of the binary
|
||||
pub entry_point: u32,
|
||||
/// The base of the code section
|
||||
pub base_of_code: u32,
|
||||
/// The image base
|
||||
pub image_base: u64,
|
||||
/// The size and address of the relocation directory
|
||||
pub reloc_dir: data_directories::DataDirectory,
|
||||
/// The size and address of the debug directory
|
||||
pub debug_dir: data_directories::DataDirectory,
|
||||
}
|
||||
|
||||
#[cfg(feature = "te")]
|
||||
#[doc(alias("IMAGE_TE_SIGNATURE"))]
|
||||
pub const TE_MAGIC: u16 = 0x5a56;
|
||||
|
||||
#[cfg(feature = "te")]
|
||||
impl TeHeader {
|
||||
/// Parse the TE header from the given bytes.
|
||||
pub fn parse(bytes: &[u8], offset: &mut usize) -> error::Result<Self> {
|
||||
let mut header: TeHeader = bytes.gread_with(offset, scroll::LE)?;
|
||||
let adj_offset = header.stripped_size as u32 - core::mem::size_of::<TeHeader>() as u32;
|
||||
header.fixup_header(adj_offset);
|
||||
Ok(header)
|
||||
}
|
||||
|
||||
/// Parse the sections from the TE header.
|
||||
pub fn sections(
|
||||
&self,
|
||||
bytes: &[u8],
|
||||
offset: &mut usize,
|
||||
) -> error::Result<Vec<section_table::SectionTable>> {
|
||||
let adj_offset = self.stripped_size as u32 - core::mem::size_of::<TeHeader>() as u32;
|
||||
let nsections = self.number_of_sections as usize;
|
||||
|
||||
// a section table is at least 40 bytes
|
||||
if nsections > bytes.len() / 40 {
|
||||
return Err(error::Error::BufferTooShort(nsections, "sections"));
|
||||
}
|
||||
|
||||
let mut sections = Vec::with_capacity(nsections);
|
||||
for i in 0..nsections {
|
||||
let mut section = section_table::SectionTable::parse(bytes, offset, 0)?;
|
||||
TeHeader::fixup_section(&mut section, adj_offset);
|
||||
debug!("({}) {:#?}", i, section);
|
||||
sections.push(section);
|
||||
}
|
||||
Ok(sections)
|
||||
}
|
||||
|
||||
// Adjust addresses in the header to account for the stripped size
|
||||
fn fixup_header(&mut self, adj_offset: u32) {
|
||||
debug!(
|
||||
"Entry point fixed up from: 0x{:x} to 0x{:X}",
|
||||
self.entry_point,
|
||||
self.entry_point.wrapping_sub(adj_offset)
|
||||
);
|
||||
self.entry_point = self.entry_point.wrapping_sub(adj_offset);
|
||||
|
||||
debug!(
|
||||
"Base of code fixed up from: 0x{:x} to 0x{:X}",
|
||||
self.base_of_code,
|
||||
self.base_of_code.wrapping_sub(adj_offset)
|
||||
);
|
||||
self.base_of_code = self.base_of_code.wrapping_sub(adj_offset);
|
||||
|
||||
debug!(
|
||||
"Relocation Directory fixed up from: 0x{:x} to 0x{:X}",
|
||||
self.reloc_dir.virtual_address,
|
||||
self.reloc_dir.virtual_address.wrapping_sub(adj_offset)
|
||||
);
|
||||
self.reloc_dir.virtual_address = self.reloc_dir.virtual_address.wrapping_sub(adj_offset);
|
||||
|
||||
debug!(
|
||||
"Debug Directory fixed up from: 0x{:x} to 0x{:X}",
|
||||
self.debug_dir.virtual_address,
|
||||
self.debug_dir.virtual_address.wrapping_sub(adj_offset)
|
||||
);
|
||||
self.debug_dir.virtual_address = self.debug_dir.virtual_address.wrapping_sub(adj_offset);
|
||||
}
|
||||
|
||||
// Adjust addresses in the section to account for the stripped size
|
||||
fn fixup_section(section: &mut section_table::SectionTable, adj_offset: u32) {
|
||||
debug!(
|
||||
"Section virtual address fixed up from: 0x{:X} to 0x{:X}",
|
||||
section.virtual_address,
|
||||
section.virtual_address.wrapping_sub(adj_offset)
|
||||
);
|
||||
section.virtual_address = section.virtual_address.wrapping_sub(adj_offset);
|
||||
|
||||
if section.pointer_to_linenumbers > 0 {
|
||||
debug!(
|
||||
"Section pointer to line numbers fixed up from: 0x{:X} to 0x{:X}",
|
||||
section.pointer_to_linenumbers,
|
||||
section.pointer_to_linenumbers.wrapping_sub(adj_offset)
|
||||
);
|
||||
section.pointer_to_linenumbers =
|
||||
section.pointer_to_linenumbers.wrapping_sub(adj_offset);
|
||||
}
|
||||
|
||||
if section.pointer_to_raw_data > 0 {
|
||||
debug!(
|
||||
"Section pointer to raw data fixed up from: 0x{:X} to 0x{:X}",
|
||||
section.pointer_to_raw_data,
|
||||
section.pointer_to_raw_data.wrapping_sub(adj_offset)
|
||||
);
|
||||
section.pointer_to_raw_data = section.pointer_to_raw_data.wrapping_sub(adj_offset);
|
||||
}
|
||||
|
||||
if section.pointer_to_relocations > 0 {
|
||||
debug!(
|
||||
"Section pointer to relocations fixed up from: 0x{:X} to 0x{:X}",
|
||||
section.pointer_to_relocations,
|
||||
section.pointer_to_relocations.wrapping_sub(adj_offset)
|
||||
);
|
||||
section.pointer_to_relocations =
|
||||
section.pointer_to_relocations.wrapping_sub(adj_offset);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert machine to str representation. Any case of "COFF_UNKNOWN"
|
||||
/// should be expected to change to a more specific value.
|
||||
pub fn machine_to_str(machine: u16) -> &'static str {
|
||||
|
||||
120
third_party/rust/goblin/src/pe/mod.rs
vendored
120
third_party/rust/goblin/src/pe/mod.rs
vendored
@@ -26,6 +26,7 @@ pub mod relocation;
|
||||
pub mod section_table;
|
||||
pub mod subsystem;
|
||||
pub mod symbol;
|
||||
pub mod tls;
|
||||
pub mod utils;
|
||||
|
||||
use crate::container;
|
||||
@@ -71,6 +72,8 @@ pub struct PE<'a> {
|
||||
pub libraries: Vec<&'a str>,
|
||||
/// Debug information, if any, contained in the PE header
|
||||
pub debug_data: Option<debug::DebugData<'a>>,
|
||||
/// TLS information, if any, contained in the PE header
|
||||
pub tls_data: Option<tls::TlsData<'a>>,
|
||||
/// Exception handling and stack unwind information, if any, contained in the PE header
|
||||
pub exception_data: Option<exception::ExceptionData<'a>>,
|
||||
/// Certificates present, if any, described by the Certificate Table
|
||||
@@ -106,6 +109,7 @@ impl<'a> PE<'a> {
|
||||
let mut import_data = None;
|
||||
let mut libraries = vec![];
|
||||
let mut debug_data = None;
|
||||
let mut tls_data = None;
|
||||
let mut exception_data = None;
|
||||
let mut certificates = Default::default();
|
||||
let mut is_64 = false;
|
||||
@@ -216,6 +220,29 @@ impl<'a> PE<'a> {
|
||||
)?);
|
||||
}
|
||||
|
||||
if let Some(tls_table) = optional_header.data_directories.get_tls_table() {
|
||||
tls_data = if is_64 {
|
||||
tls::TlsData::parse_with_opts::<u64>(
|
||||
bytes,
|
||||
image_base,
|
||||
tls_table,
|
||||
§ions,
|
||||
file_alignment,
|
||||
opts,
|
||||
)?
|
||||
} else {
|
||||
tls::TlsData::parse_with_opts::<u32>(
|
||||
bytes,
|
||||
image_base,
|
||||
&tls_table,
|
||||
§ions,
|
||||
file_alignment,
|
||||
opts,
|
||||
)?
|
||||
};
|
||||
debug!("tls data: {:#?}", tls_data);
|
||||
}
|
||||
|
||||
if header.coff_header.machine == header::COFF_MACHINE_X86_64 {
|
||||
// currently only x86_64 is supported
|
||||
debug!("exception data: {:#?}", exception_data);
|
||||
@@ -275,6 +302,7 @@ impl<'a> PE<'a> {
|
||||
imports,
|
||||
libraries,
|
||||
debug_data,
|
||||
tls_data,
|
||||
exception_data,
|
||||
certificates,
|
||||
})
|
||||
@@ -467,6 +495,98 @@ impl<'a> ctx::TryIntoCtx<scroll::Endian> for PE<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// An analyzed TE binary
|
||||
///
|
||||
/// A TE binary is a PE/PE32+ binary that has had it's header stripped and
|
||||
/// re-formatted to the TE specification. This presents a challenge for
|
||||
/// parsing, as all relative addresses (RVAs) are not updated to take this into
|
||||
/// account, and are thus incorrect. The parsing of a TE must take this into
|
||||
/// account by using the [header::TeHeader::stripped_size`] field of the TE
|
||||
/// header to adjust the RVAs during parsing.
|
||||
#[cfg(feature = "te")]
|
||||
#[derive(Debug)]
|
||||
pub struct TE<'a> {
|
||||
/// The TE header
|
||||
pub header: header::TeHeader,
|
||||
/// A list of the sections in this TE binary
|
||||
pub sections: Vec<section_table::SectionTable>,
|
||||
/// Debug information, contained in the PE header
|
||||
pub debug_data: debug::DebugData<'a>,
|
||||
/// The offset to apply to addresses not parsed by the TE parser
|
||||
/// itself: [header::TeHeader::stripped_size] - size_of::<[header::TeHeader]>()
|
||||
pub rva_offset: usize,
|
||||
}
|
||||
|
||||
#[cfg(feature = "te")]
|
||||
impl<'a> TE<'a> {
|
||||
/// Reads a TE binary from the underlying `bytes`
|
||||
pub fn parse(bytes: &'a [u8]) -> error::Result<Self> {
|
||||
let opts = &options::ParseOptions {
|
||||
resolve_rva: false,
|
||||
parse_attribute_certificates: false,
|
||||
};
|
||||
|
||||
let mut offset = 0;
|
||||
|
||||
// Parse the TE header and adjust the offsets
|
||||
let header = header::TeHeader::parse(bytes, &mut offset)?;
|
||||
let rva_offset = header.stripped_size as usize - core::mem::size_of::<header::TeHeader>();
|
||||
|
||||
// Parse the sections and adjust the offsets
|
||||
let sections = header.sections(bytes, &mut offset)?;
|
||||
|
||||
// Parse the debug data. Must adjust offsets before parsing the image_debug_directory
|
||||
let mut debug_data = debug::DebugData::default();
|
||||
debug_data.image_debug_directory = debug::ImageDebugDirectory::parse_with_opts(
|
||||
bytes,
|
||||
header.debug_dir,
|
||||
§ions,
|
||||
0,
|
||||
opts,
|
||||
)?;
|
||||
TE::fixup_debug_data(&mut debug_data, rva_offset as u32);
|
||||
debug_data.codeview_pdb70_debug_info = debug::CodeviewPDB70DebugInfo::parse_with_opts(
|
||||
bytes,
|
||||
&debug_data.image_debug_directory,
|
||||
opts,
|
||||
)?;
|
||||
|
||||
Ok(TE {
|
||||
header,
|
||||
sections,
|
||||
debug_data,
|
||||
rva_offset,
|
||||
})
|
||||
}
|
||||
|
||||
/// Adjust all addresses in the TE binary debug data.
|
||||
fn fixup_debug_data(dd: &mut debug::DebugData, rva_offset: u32) {
|
||||
debug!(
|
||||
"ImageDebugDirectory address of raw data fixed up from: 0x{:X} to 0x{:X}",
|
||||
dd.image_debug_directory.address_of_raw_data,
|
||||
dd.image_debug_directory
|
||||
.address_of_raw_data
|
||||
.wrapping_sub(rva_offset),
|
||||
);
|
||||
dd.image_debug_directory.address_of_raw_data = dd
|
||||
.image_debug_directory
|
||||
.address_of_raw_data
|
||||
.wrapping_sub(rva_offset);
|
||||
|
||||
debug!(
|
||||
"ImageDebugDirectory pointer to raw data fixed up from: 0x{:X} to 0x{:X}",
|
||||
dd.image_debug_directory.pointer_to_raw_data,
|
||||
dd.image_debug_directory
|
||||
.pointer_to_raw_data
|
||||
.wrapping_sub(rva_offset),
|
||||
);
|
||||
dd.image_debug_directory.pointer_to_raw_data = dd
|
||||
.image_debug_directory
|
||||
.pointer_to_raw_data
|
||||
.wrapping_sub(rva_offset);
|
||||
}
|
||||
}
|
||||
|
||||
/// An analyzed COFF object
|
||||
#[derive(Debug)]
|
||||
pub struct Coff<'a> {
|
||||
|
||||
247
third_party/rust/goblin/src/pe/tls.rs
vendored
Normal file
247
third_party/rust/goblin/src/pe/tls.rs
vendored
Normal file
@@ -0,0 +1,247 @@
|
||||
use crate::error;
|
||||
use alloc::vec::Vec;
|
||||
use scroll::{Pread, Pwrite, SizeWith};
|
||||
|
||||
use crate::pe::data_directories;
|
||||
use crate::pe::options;
|
||||
use crate::pe::section_table;
|
||||
use crate::pe::utils;
|
||||
|
||||
/// Represents the TLS directory `IMAGE_TLS_DIRECTORY64`.
|
||||
#[repr(C)]
|
||||
#[derive(Debug, PartialEq, Copy, Clone, Default, Pread, Pwrite, SizeWith)]
|
||||
pub struct ImageTlsDirectory {
|
||||
/// The starting address of the TLS raw data.
|
||||
// NOTE: `u32` for 32-bit binaries, `u64` for 64-bit binaries.
|
||||
pub start_address_of_raw_data: u64,
|
||||
/// The ending address of the TLS raw data.
|
||||
// NOTE: `u32` for 32-bit binaries, `u64` for 64-bit binaries.
|
||||
pub end_address_of_raw_data: u64,
|
||||
/// The address of the TLS index.
|
||||
// NOTE: `u32` for 32-bit binaries, `u64` for 64-bit binaries.
|
||||
pub address_of_index: u64,
|
||||
/// The address of the TLS callback functions.
|
||||
///
|
||||
/// Terminated by a null pointer.
|
||||
// NOTE: `u32` for 32-bit binaries, `u64` for 64-bit binaries.
|
||||
pub address_of_callbacks: u64,
|
||||
/// The size of the zero fill.
|
||||
pub size_of_zero_fill: u32,
|
||||
/// The characteristics of the TLS.
|
||||
pub characteristics: u32,
|
||||
}
|
||||
|
||||
/// TLS information.
|
||||
#[derive(Debug, Clone, PartialEq, Default)]
|
||||
pub struct TlsData<'a> {
|
||||
/// TLS directory.
|
||||
pub image_tls_directory: ImageTlsDirectory,
|
||||
/// Raw data of the TLS.
|
||||
pub raw_data: Option<&'a [u8]>,
|
||||
/// TLS index.
|
||||
pub slot: Option<u32>,
|
||||
/// TLS callbacks.
|
||||
pub callbacks: Vec<u64>,
|
||||
}
|
||||
|
||||
impl ImageTlsDirectory {
|
||||
pub fn parse<T: Sized>(
|
||||
bytes: &[u8],
|
||||
dd: data_directories::DataDirectory,
|
||||
sections: &[section_table::SectionTable],
|
||||
file_alignment: u32,
|
||||
) -> error::Result<Self> {
|
||||
Self::parse_with_opts::<T>(
|
||||
bytes,
|
||||
dd,
|
||||
sections,
|
||||
file_alignment,
|
||||
&options::ParseOptions::default(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn parse_with_opts<T: Sized>(
|
||||
bytes: &[u8],
|
||||
dd: data_directories::DataDirectory,
|
||||
sections: &[section_table::SectionTable],
|
||||
file_alignment: u32,
|
||||
opts: &options::ParseOptions,
|
||||
) -> error::Result<Self> {
|
||||
let rva = dd.virtual_address as usize;
|
||||
let mut offset =
|
||||
utils::find_offset(rva, sections, file_alignment, opts).ok_or_else(|| {
|
||||
error::Error::Malformed(format!(
|
||||
"Cannot map ImageTlsDirectory rva {:#x} into offset",
|
||||
rva
|
||||
))
|
||||
})?;
|
||||
|
||||
let is_64 = core::mem::size_of::<T>() == 8;
|
||||
|
||||
let start_address_of_raw_data = if is_64 {
|
||||
bytes.gread_with::<u64>(&mut offset, scroll::LE)?
|
||||
} else {
|
||||
bytes.gread_with::<u32>(&mut offset, scroll::LE)? as u64
|
||||
};
|
||||
let end_address_of_raw_data = if is_64 {
|
||||
bytes.gread_with::<u64>(&mut offset, scroll::LE)?
|
||||
} else {
|
||||
bytes.gread_with::<u32>(&mut offset, scroll::LE)? as u64
|
||||
};
|
||||
let address_of_index = if is_64 {
|
||||
bytes.gread_with::<u64>(&mut offset, scroll::LE)?
|
||||
} else {
|
||||
bytes.gread_with::<u32>(&mut offset, scroll::LE)? as u64
|
||||
};
|
||||
let address_of_callbacks = if is_64 {
|
||||
bytes.gread_with::<u64>(&mut offset, scroll::LE)?
|
||||
} else {
|
||||
bytes.gread_with::<u32>(&mut offset, scroll::LE)? as u64
|
||||
};
|
||||
let size_of_zero_fill = bytes.gread_with::<u32>(&mut offset, scroll::LE)?;
|
||||
let characteristics = bytes.gread_with::<u32>(&mut offset, scroll::LE)?;
|
||||
|
||||
let itd = Self {
|
||||
start_address_of_raw_data,
|
||||
end_address_of_raw_data,
|
||||
address_of_index,
|
||||
address_of_callbacks,
|
||||
size_of_zero_fill,
|
||||
characteristics,
|
||||
};
|
||||
|
||||
Ok(itd)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> TlsData<'a> {
|
||||
pub fn parse<T: Sized>(
|
||||
bytes: &'a [u8],
|
||||
image_base: usize,
|
||||
dd: &data_directories::DataDirectory,
|
||||
sections: &[section_table::SectionTable],
|
||||
file_alignment: u32,
|
||||
) -> error::Result<Option<Self>> {
|
||||
Self::parse_with_opts::<T>(
|
||||
bytes,
|
||||
image_base,
|
||||
dd,
|
||||
sections,
|
||||
file_alignment,
|
||||
&options::ParseOptions::default(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn parse_with_opts<T: Sized>(
|
||||
bytes: &'a [u8],
|
||||
image_base: usize,
|
||||
dd: &data_directories::DataDirectory,
|
||||
sections: &[section_table::SectionTable],
|
||||
file_alignment: u32,
|
||||
opts: &options::ParseOptions,
|
||||
) -> error::Result<Option<Self>> {
|
||||
let mut raw_data = None;
|
||||
let mut slot = None;
|
||||
let mut callbacks = Vec::new();
|
||||
|
||||
let is_64 = core::mem::size_of::<T>() == 8;
|
||||
|
||||
let itd =
|
||||
ImageTlsDirectory::parse_with_opts::<T>(bytes, *dd, sections, file_alignment, opts)?;
|
||||
|
||||
// Parse the raw data if any
|
||||
if itd.end_address_of_raw_data != 0 && itd.start_address_of_raw_data != 0 {
|
||||
if itd.start_address_of_raw_data > itd.end_address_of_raw_data {
|
||||
return Err(error::Error::Malformed(format!(
|
||||
"tls start_address_of_raw_data ({:#x}) is greater than end_address_of_raw_data ({:#x})",
|
||||
itd.start_address_of_raw_data,
|
||||
itd.end_address_of_raw_data
|
||||
)));
|
||||
}
|
||||
|
||||
if (itd.start_address_of_raw_data as usize) < image_base {
|
||||
return Err(error::Error::Malformed(format!(
|
||||
"tls start_address_of_raw_data ({:#x}) is less than image base ({:#x})",
|
||||
itd.start_address_of_raw_data, image_base
|
||||
)));
|
||||
}
|
||||
|
||||
// VA to RVA
|
||||
let rva = itd.start_address_of_raw_data as usize - image_base;
|
||||
let size = itd.end_address_of_raw_data - itd.start_address_of_raw_data;
|
||||
let offset =
|
||||
utils::find_offset(rva, sections, file_alignment, opts).ok_or_else(|| {
|
||||
error::Error::Malformed(format!(
|
||||
"cannot map tls start_address_of_raw_data rva ({:#x}) into offset",
|
||||
rva
|
||||
))
|
||||
})?;
|
||||
raw_data = Some(&bytes[offset..offset + size as usize]);
|
||||
}
|
||||
|
||||
// Parse the index if any
|
||||
if itd.address_of_index != 0 {
|
||||
if (itd.address_of_index as usize) < image_base {
|
||||
return Err(error::Error::Malformed(format!(
|
||||
"tls address_of_index ({:#x}) is less than image base ({:#x})",
|
||||
itd.address_of_index, image_base
|
||||
)));
|
||||
}
|
||||
|
||||
// VA to RVA
|
||||
let rva = itd.address_of_index as usize - image_base;
|
||||
let offset = utils::find_offset(rva, sections, file_alignment, opts);
|
||||
slot = offset.and_then(|x| bytes.pread_with::<u32>(x, scroll::LE).ok());
|
||||
}
|
||||
|
||||
// Parse the callbacks if any
|
||||
if itd.address_of_callbacks != 0 {
|
||||
if (itd.address_of_callbacks as usize) < image_base {
|
||||
return Err(error::Error::Malformed(format!(
|
||||
"tls address_of_callbacks ({:#x}) is less than image base ({:#x})",
|
||||
itd.address_of_callbacks, image_base
|
||||
)));
|
||||
}
|
||||
|
||||
// VA to RVA
|
||||
let rva = itd.address_of_callbacks as usize - image_base;
|
||||
let offset =
|
||||
utils::find_offset(rva, sections, file_alignment, opts).ok_or_else(|| {
|
||||
error::Error::Malformed(format!(
|
||||
"cannot map tls address_of_callbacks rva ({:#x}) into offset",
|
||||
rva
|
||||
))
|
||||
})?;
|
||||
let mut i = 0;
|
||||
// Read the callbacks until we find a null terminator
|
||||
loop {
|
||||
let callback: u64 = if is_64 {
|
||||
bytes.pread_with::<u64>(offset + i * 8, scroll::LE)?
|
||||
} else {
|
||||
bytes.pread_with::<u32>(offset + i * 4, scroll::LE)? as u64
|
||||
};
|
||||
if callback == 0 {
|
||||
break;
|
||||
}
|
||||
// Each callback is an VA so convert it to RVA
|
||||
let callback_rva = callback as usize - image_base;
|
||||
// Check if the callback is in the image
|
||||
if utils::find_offset(callback_rva, sections, file_alignment, opts).is_none() {
|
||||
return Err(error::Error::Malformed(format!(
|
||||
"cannot map tls callback ({:#x})",
|
||||
callback
|
||||
)));
|
||||
}
|
||||
callbacks.push(callback);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(TlsData {
|
||||
image_tls_directory: itd,
|
||||
raw_data,
|
||||
slot,
|
||||
callbacks,
|
||||
}))
|
||||
}
|
||||
}
|
||||
2
third_party/rust/goblin/src/pe/utils.rs
vendored
2
third_party/rust/goblin/src/pe/utils.rs
vendored
@@ -64,6 +64,8 @@ fn section_read_size(section: §ion_table::SectionTable, file_alignment: u32)
|
||||
|
||||
if virtual_size == 0 {
|
||||
read_size
|
||||
} else if read_size == 0 {
|
||||
virtual_size
|
||||
} else {
|
||||
cmp::min(read_size, round_size(virtual_size))
|
||||
}
|
||||
|
||||
24
third_party/rust/goblin/tests/bins/te/README.md
vendored
Normal file
24
third_party/rust/goblin/tests/bins/te/README.md
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# TE binaries
|
||||
|
||||
Binaries located in this directory are precompiled PE32/PE32+ binaries using a
|
||||
terse executable (TE) header as defined in the Platform Initialization (PI)
|
||||
specification: [TE](https://uefi.org/specs/PI/1.8/V1_TE_Image.html#te-header).
|
||||
These binaries were compiled using the
|
||||
[EDK2](https://github.com/tianocore/edk2) build system.
|
||||
|
||||
## test_image.te
|
||||
|
||||
This binary is a simple Terse executable binary
|
||||
|
||||
## test_image_loaded.bin
|
||||
|
||||
This binary is the same as `test_image.te`, but it has been loaded by a loader,
|
||||
meaning the sections have been placed in the expected address. Please note that
|
||||
this particular binary has not been relocated, so no relocations have been
|
||||
applied
|
||||
|
||||
## test_image_relocated.bin
|
||||
|
||||
This binary is the same as `test_image.te`, but it has been loaded by a loader,
|
||||
meaning the sections have been placed in the expected address, and any any
|
||||
relocations have been applied.
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"3fe59b28a1e995925fd0d0a750787be30686e0ee5f93c0ff23ec5cdf9ef1ab87","LICENSE":"06de63df29199a394442b57a28e886059ddc940973e10646877a0793fd53e2c9","README.md":"4c2a1448aab9177fd5f033faaf704af7bb222bf0804079fd3cff90fa1df4b812","src/errors/linux.rs":"df743ac9478e39f8a577f4f10f2d1317babad7b7c0d26cdbba2ea6b5426f4126","src/errors/macos.rs":"4516aaeb7abf6209f5cd94e86a1e55a9675ef77262f52e3b2d5596fd4b858458","src/errors/mod.rs":"f224af66124fd31a040c8da11bbab7b7795b48e4edea76e01c1f4dee537ea38a","src/errors/windows.rs":"0567af7bfac3ae2a8dff418e10873d8a5bf15a8b8ac6892c5ffdab08ec3ac901","src/format.rs":"17daa7037ba9b4e8aa30a880975e883f618b3ff3ebb0b88a81313a895700fc6c","src/lib.rs":"0900c00594b3c386b86127055889006f0d7d0004b08455fadb0e60d55a469cab","src/traits.rs":"93127ad69a849325ed66a0626e0bdae05868488f81c539d35c71a7bfbb9e51ac","src/utils.rs":"6ab64a2fc39187903de0813d27db370cbeb57ba4984c6a993034829176bed4d7"},"package":"0cd8a9fb054833d2f402e82e256aeef544e595e45fe8fca2de6d03ed605f6647"}
|
||||
{"files":{"Cargo.toml":"9ae1d211e4d197c8a257a67d4f9436738aeccab960401283e6b5cf1f2e9c19d4","LICENSE":"06de63df29199a394442b57a28e886059ddc940973e10646877a0793fd53e2c9","README.md":"4c2a1448aab9177fd5f033faaf704af7bb222bf0804079fd3cff90fa1df4b812","src/errors/linux.rs":"df743ac9478e39f8a577f4f10f2d1317babad7b7c0d26cdbba2ea6b5426f4126","src/errors/macos.rs":"4516aaeb7abf6209f5cd94e86a1e55a9675ef77262f52e3b2d5596fd4b858458","src/errors/mod.rs":"f224af66124fd31a040c8da11bbab7b7795b48e4edea76e01c1f4dee537ea38a","src/errors/windows.rs":"0567af7bfac3ae2a8dff418e10873d8a5bf15a8b8ac6892c5ffdab08ec3ac901","src/format.rs":"db607fd726e74da5aa4be8e8458409e7238c8194db201b3a0cb09ab822b738d2","src/lib.rs":"0900c00594b3c386b86127055889006f0d7d0004b08455fadb0e60d55a469cab","src/traits.rs":"e78a0dbf496b73b4d904874475ebc621ef5912db47b04bb37d3b2100c82629d3","src/utils.rs":"6ab64a2fc39187903de0813d27db370cbeb57ba4984c6a993034829176bed4d7"},"package":"5273687f49325b3977f7d372a1bbe2e528694d18128de8dcac78d134448e83b4"}
|
||||
3
third_party/rust/minidump-common/Cargo.toml
vendored
3
third_party/rust/minidump-common/Cargo.toml
vendored
@@ -12,9 +12,10 @@
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "minidump-common"
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
authors = ["Ted Mielczarek <ted@mielczarek.org>"]
|
||||
build = false
|
||||
autolib = false
|
||||
autobins = false
|
||||
autoexamples = false
|
||||
autotests = false
|
||||
|
||||
11
third_party/rust/minidump-common/src/format.rs
vendored
11
third_party/rust/minidump-common/src/format.rs
vendored
@@ -323,6 +323,9 @@ pub enum MINIDUMP_STREAM_TYPE {
|
||||
|
||||
/// The contents of /proc/self/limits from a Linux system
|
||||
MozLinuxLimits = 0x4d7a0003,
|
||||
|
||||
/// Soft errors reported during minidump generation
|
||||
MozSoftErrors = 0x4d7a0004,
|
||||
}
|
||||
|
||||
impl From<MINIDUMP_STREAM_TYPE> for u32 {
|
||||
@@ -470,7 +473,7 @@ pub struct CV_INFO_PDB20 {
|
||||
pub pdb_file_name: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<'a> scroll::ctx::TryFromCtx<'a, Endian> for CV_INFO_PDB20 {
|
||||
impl scroll::ctx::TryFromCtx<'_, Endian> for CV_INFO_PDB20 {
|
||||
type Error = scroll::Error;
|
||||
|
||||
fn try_from_ctx(src: &[u8], endian: Endian) -> Result<(Self, usize), Self::Error> {
|
||||
@@ -506,7 +509,7 @@ pub struct CV_INFO_PDB70 {
|
||||
pub pdb_file_name: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<'a> scroll::ctx::TryFromCtx<'a, Endian> for CV_INFO_PDB70 {
|
||||
impl scroll::ctx::TryFromCtx<'_, Endian> for CV_INFO_PDB70 {
|
||||
type Error = scroll::Error;
|
||||
|
||||
fn try_from_ctx(src: &[u8], endian: Endian) -> Result<(Self, usize), Self::Error> {
|
||||
@@ -1742,7 +1745,7 @@ pub struct XstateFeatureIter<'a> {
|
||||
idx: usize,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for XstateFeatureIter<'a> {
|
||||
impl Iterator for XstateFeatureIter<'_> {
|
||||
type Item = (usize, XSTATE_FEATURE);
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
while self.idx < self.info.features.len() {
|
||||
@@ -2055,7 +2058,7 @@ pub struct MINIDUMP_UTF8_STRING {
|
||||
pub buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<'a> scroll::ctx::TryFromCtx<'a, Endian> for MINIDUMP_UTF8_STRING {
|
||||
impl scroll::ctx::TryFromCtx<'_, Endian> for MINIDUMP_UTF8_STRING {
|
||||
type Error = scroll::Error;
|
||||
|
||||
fn try_from_ctx(src: &[u8], endian: Endian) -> Result<(Self, usize), Self::Error> {
|
||||
|
||||
@@ -39,7 +39,7 @@ pub trait Module {
|
||||
|
||||
/// Implement Module for 2-tuples of (&str, DebugId) for convenience.
|
||||
/// `breakpad-symbols`' `Symbolizer::get_symbol_at_address` uses this.
|
||||
impl<'a> Module for (&'a str, DebugId) {
|
||||
impl Module for (&str, DebugId) {
|
||||
fn base_address(&self) -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"ec7d84e19cbefba8024be68cc043fc067d1efca8739db4e449b04677b113d500","LICENSE":"06de63df29199a394442b57a28e886059ddc940973e10646877a0793fd53e2c9","README.md":"0202d4bced7cbc5fb801a916d11c752019111a0043fb6476ef945414d322588e","src/amd64.rs":"c05bfdb7479ba5e2d3ba942b75e66e38784b2fe7deef5045344db43d438bb9c8","src/amd64_unittest.rs":"9a0aea53f153201bd8ffcfec215966be671098de508ce1429d4536402893c0d5","src/arm.rs":"a61b28eabf8c72ea7d16911426c6bbab4e08766ca43e75099d0cd7817e02a977","src/arm64.rs":"c8233f255b64d116ea804a0fe15a4ee0211124f9434d0125a957d267ab6ea3c2","src/arm64_old.rs":"78843e9a46e3ce5f461a19d63445fd1b2aac7f4e6d91aac0f3b9e352b958606c","src/arm64_unittest.rs":"d48d7577422aa53bb041bc243e866d369bdc57fd1219ebe7f7032b9bfedfc1dc","src/arm_unittest.rs":"b2e64e57ed638c228d41ecf35ea57f0a6f9ca007a5288bc63a779fa759548c50","src/lib.rs":"77e4267e6c91195e848f0cddca1ad1b3e564c950d21781a835de3540a6bee7f5","src/mips.rs":"897aa10ade29adc4253bb2049b19a7e1690bf548ce015ba63eba1f8bcc04361c","src/symbols/debuginfo.rs":"c857a28410ac09b5ced5be21ff593ab5348478395e24addbbbfa873cafc60283","src/symbols/mod.rs":"a598e48bf2ef657e7c833f7f2d3950a10c68483e71b98f136c7975baba9ad238","src/system_info.rs":"228ac55b18a647e5302b5cb7c10e65c9d046decb5d9207e4ded098405bf1739c","src/x86.rs":"fab7ccec6285a9970da7f13709b8e7e53b1198ca275235e4bd415a620d87197e","src/x86_unittest.rs":"73212f5b1c2ce4605e540230c5574de817e42e0447ffe304b8822b69066aa7c8"},"package":"efde3c09258c297c0f6761f04d97771ef82a59a6734e7ba0e6e2ef961fb3cbb3"}
|
||||
{"files":{"Cargo.toml":"343d32b03896ac26322b052dc1b4b068af995593b696ad12b0b296f1ee64b3f7","LICENSE":"06de63df29199a394442b57a28e886059ddc940973e10646877a0793fd53e2c9","README.md":"0202d4bced7cbc5fb801a916d11c752019111a0043fb6476ef945414d322588e","src/amd64.rs":"c05bfdb7479ba5e2d3ba942b75e66e38784b2fe7deef5045344db43d438bb9c8","src/amd64_unittest.rs":"9a0aea53f153201bd8ffcfec215966be671098de508ce1429d4536402893c0d5","src/arm.rs":"a61b28eabf8c72ea7d16911426c6bbab4e08766ca43e75099d0cd7817e02a977","src/arm64.rs":"c8233f255b64d116ea804a0fe15a4ee0211124f9434d0125a957d267ab6ea3c2","src/arm64_old.rs":"78843e9a46e3ce5f461a19d63445fd1b2aac7f4e6d91aac0f3b9e352b958606c","src/arm64_unittest.rs":"d48d7577422aa53bb041bc243e866d369bdc57fd1219ebe7f7032b9bfedfc1dc","src/arm_unittest.rs":"b2e64e57ed638c228d41ecf35ea57f0a6f9ca007a5288bc63a779fa759548c50","src/lib.rs":"77e4267e6c91195e848f0cddca1ad1b3e564c950d21781a835de3540a6bee7f5","src/mips.rs":"897aa10ade29adc4253bb2049b19a7e1690bf548ce015ba63eba1f8bcc04361c","src/symbols/debuginfo.rs":"4301681af1a4cb1e52ed100654ec298cf74fe16e20f14256e45ba460424f580b","src/symbols/mod.rs":"a598e48bf2ef657e7c833f7f2d3950a10c68483e71b98f136c7975baba9ad238","src/system_info.rs":"228ac55b18a647e5302b5cb7c10e65c9d046decb5d9207e4ded098405bf1739c","src/x86.rs":"fab7ccec6285a9970da7f13709b8e7e53b1198ca275235e4bd415a620d87197e","src/x86_unittest.rs":"73212f5b1c2ce4605e540230c5574de817e42e0447ffe304b8822b69066aa7c8"},"package":"c30454f5703c77433b4059bf5e196266b800b14223c55793ee636e49c8f9160e"}
|
||||
11
third_party/rust/minidump-unwind/Cargo.toml
vendored
11
third_party/rust/minidump-unwind/Cargo.toml
vendored
@@ -12,9 +12,10 @@
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "minidump-unwind"
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
authors = ["Alex Franchuk <afranchuk@mozilla.com>"]
|
||||
build = false
|
||||
autolib = false
|
||||
autobins = false
|
||||
autoexamples = false
|
||||
autotests = false
|
||||
@@ -37,14 +38,14 @@ path = "src/lib.rs"
|
||||
version = "0.1.52"
|
||||
|
||||
[dependencies.breakpad-symbols]
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
|
||||
[dependencies.cachemap2]
|
||||
version = "0.3.0"
|
||||
optional = true
|
||||
|
||||
[dependencies.framehop]
|
||||
version = "0.12"
|
||||
version = "0.13"
|
||||
optional = true
|
||||
|
||||
[dependencies.futures-util]
|
||||
@@ -56,10 +57,10 @@ version = "0.9"
|
||||
optional = true
|
||||
|
||||
[dependencies.minidump]
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
|
||||
[dependencies.minidump-common]
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
|
||||
[dependencies.object]
|
||||
version = "0.36"
|
||||
|
||||
@@ -301,7 +301,7 @@ mod object_section_info {
|
||||
#[repr(transparent)]
|
||||
pub struct ObjectSectionInfo<'a, O>(pub &'a O);
|
||||
|
||||
impl<'a, O> std::ops::Deref for ObjectSectionInfo<'a, O> {
|
||||
impl<O> std::ops::Deref for ObjectSectionInfo<'_, O> {
|
||||
type Target = O;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
|
||||
File diff suppressed because one or more lines are too long
12
third_party/rust/minidump-writer/CHANGELOG.md
vendored
12
third_party/rust/minidump-writer/CHANGELOG.md
vendored
@@ -8,6 +8,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
<!-- next-header -->
|
||||
## [Unreleased] - ReleaseDate
|
||||
## [0.10.2] - 2025-02-03
|
||||
### Added
|
||||
- [PR#143](https://github.com/rust-minidump/minidump-writer/pull/143)
|
||||
- turn many errors that are currently treated as critical (and thus prevent minidump generation) into non-critical "soft" errors
|
||||
- collect non-critical errors and serialize them into a new JSON stream in the minidump
|
||||
|
||||
### Changed
|
||||
- [PR#145](https://github.com/rust-minidump/minidump-writer/pull/145) updated dependencies.
|
||||
|
||||
## [0.10.1] - 2024-09-20
|
||||
### Fixed
|
||||
- [PR#129](https://github.com/rust-minidump/minidump-writer/pull/129) added checking of additions to ensure invalid memory offsets are gracefully handled.
|
||||
@@ -157,7 +166,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- Initial release, including basic support for `x86_64-unknown-linux-gnu/musl` and `x86_64-pc-windows-msvc`
|
||||
|
||||
<!-- next-url -->
|
||||
[Unreleased]: https://github.com/rust-minidump/minidump-writer/compare/0.10.1...HEAD
|
||||
[Unreleased]: https://github.com/rust-minidump/minidump-writer/compare/0.10.2...HEAD
|
||||
[0.10.2]: https://github.com/rust-minidump/minidump-writer/compare/0.10.1...0.10.2
|
||||
[0.10.1]: https://github.com/rust-minidump/minidump-writer/compare/0.10.0...0.10.1
|
||||
[0.10.0]: https://github.com/rust-minidump/minidump-writer/compare/0.9.0...0.10.0
|
||||
[0.9.0]: https://github.com/rust-minidump/minidump-writer/compare/0.8.9...0.9.0
|
||||
|
||||
1180
third_party/rust/minidump-writer/Cargo.lock
generated
vendored
1180
third_party/rust/minidump-writer/Cargo.lock
generated
vendored
File diff suppressed because it is too large
Load Diff
50
third_party/rust/minidump-writer/Cargo.toml
vendored
50
third_party/rust/minidump-writer/Cargo.toml
vendored
@@ -12,9 +12,10 @@
|
||||
[package]
|
||||
edition = "2021"
|
||||
name = "minidump-writer"
|
||||
version = "0.10.1"
|
||||
version = "0.10.2"
|
||||
authors = ["Martin Sirringhaus"]
|
||||
build = "build.rs"
|
||||
autolib = false
|
||||
autobins = false
|
||||
autoexamples = false
|
||||
autotests = false
|
||||
@@ -41,6 +42,10 @@ path = "examples/synthetic.rs"
|
||||
name = "linux_minidump_writer"
|
||||
path = "tests/linux_minidump_writer.rs"
|
||||
|
||||
[[test]]
|
||||
name = "linux_minidump_writer_soft_error"
|
||||
path = "tests/linux_minidump_writer_soft_error.rs"
|
||||
|
||||
[[test]]
|
||||
name = "mac_minidump_writer"
|
||||
path = "tests/mac_minidump_writer.rs"
|
||||
@@ -58,7 +63,7 @@ name = "windows_minidump_writer"
|
||||
path = "tests/windows_minidump_writer.rs"
|
||||
|
||||
[dependencies.bitflags]
|
||||
version = "2.4"
|
||||
version = "2.8"
|
||||
|
||||
[dependencies.byteorder]
|
||||
version = "1.4"
|
||||
@@ -69,6 +74,13 @@ version = "1.0"
|
||||
[dependencies.crash-context]
|
||||
version = "0.6"
|
||||
|
||||
[dependencies.error-graph]
|
||||
version = "0.1.1"
|
||||
features = ["serde"]
|
||||
|
||||
[dependencies.failspot]
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies.log]
|
||||
version = "0.4"
|
||||
|
||||
@@ -76,20 +88,31 @@ version = "0.4"
|
||||
version = "0.9"
|
||||
|
||||
[dependencies.minidump-common]
|
||||
version = "0.22"
|
||||
version = "0.24"
|
||||
|
||||
[dependencies.scroll]
|
||||
version = "0.12"
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1.0.208"
|
||||
features = ["derive"]
|
||||
|
||||
[dependencies.serde_json]
|
||||
version = "1.0.116"
|
||||
|
||||
[dependencies.tempfile]
|
||||
version = "3.8"
|
||||
version = "3.16"
|
||||
|
||||
[dependencies.thiserror]
|
||||
version = "1.0"
|
||||
version = "2.0"
|
||||
|
||||
[dev-dependencies.current_platform]
|
||||
version = "0.2"
|
||||
|
||||
[dev-dependencies.failspot]
|
||||
version = "0.2.0"
|
||||
features = ["enabled"]
|
||||
|
||||
[dev-dependencies.futures]
|
||||
version = "0.3"
|
||||
features = ["executor"]
|
||||
@@ -98,7 +121,7 @@ features = ["executor"]
|
||||
version = "0.9"
|
||||
|
||||
[dev-dependencies.minidump]
|
||||
version = "0.22"
|
||||
version = "0.24"
|
||||
|
||||
[target.'cfg(any(target_os = "linux", target_os = "android"))'.dependencies.nix]
|
||||
version = "0.29"
|
||||
@@ -113,7 +136,8 @@ features = [
|
||||
default-features = false
|
||||
|
||||
[target.'cfg(any(target_os = "linux", target_os = "android"))'.dependencies.procfs-core]
|
||||
version = "0.16"
|
||||
version = "0.17"
|
||||
features = ["serde1"]
|
||||
default-features = false
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dependencies.mach2]
|
||||
@@ -123,25 +147,21 @@ version = "0.4"
|
||||
version = "2.2"
|
||||
default-features = false
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dev-dependencies.minidump-processor]
|
||||
version = "0.22"
|
||||
default-features = false
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dev-dependencies.minidump-unwind]
|
||||
version = "0.22"
|
||||
version = "0.24"
|
||||
features = ["debuginfo"]
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dev-dependencies.similar-asserts]
|
||||
version = "1.5"
|
||||
version = "1.6"
|
||||
|
||||
[target.'cfg(target_os = "macos")'.dev-dependencies.uuid]
|
||||
version = "1.4"
|
||||
version = "1.12"
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies.bitflags]
|
||||
version = "2.4"
|
||||
|
||||
[target."cfg(unix)".dependencies.goblin]
|
||||
version = "0.8.2"
|
||||
version = "0.9.2"
|
||||
|
||||
[target."cfg(unix)".dependencies.libc]
|
||||
version = "0.2"
|
||||
|
||||
3
third_party/rust/minidump-writer/deny.toml
vendored
3
third_party/rust/minidump-writer/deny.toml
vendored
@@ -20,5 +20,4 @@ deny = [
|
||||
skip-tree = []
|
||||
|
||||
[licenses]
|
||||
allow = ["MIT", "Apache-2.0"]
|
||||
exceptions = [{ allow = ["Unicode-DFS-2016"], name = "unicode-ident" }]
|
||||
allow = ["MIT", "Apache-2.0", "Unicode-3.0"]
|
||||
|
||||
127
third_party/rust/minidump-writer/src/bin/test.rs
vendored
127
third_party/rust/minidump-writer/src/bin/test.rs
vendored
@@ -6,14 +6,17 @@ pub type Result<T> = std::result::Result<T, Error>;
|
||||
|
||||
#[cfg(any(target_os = "linux", target_os = "android"))]
|
||||
mod linux {
|
||||
use super::*;
|
||||
use minidump_writer::{
|
||||
use {
|
||||
super::*,
|
||||
error_graph::ErrorList,
|
||||
minidump_writer::{
|
||||
minidump_writer::STOP_TIMEOUT, module_reader, ptrace_dumper::PtraceDumper,
|
||||
LINUX_GATE_LIBRARY_NAME,
|
||||
};
|
||||
use nix::{
|
||||
},
|
||||
nix::{
|
||||
sys::mman::{mmap_anonymous, MapFlags, ProtFlags},
|
||||
unistd::getppid,
|
||||
},
|
||||
};
|
||||
|
||||
macro_rules! test {
|
||||
@@ -24,15 +27,40 @@ mod linux {
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! fail_on_soft_error(($n: ident, $e: expr) => {{
|
||||
let mut $n = ErrorList::default();
|
||||
let __result = $e;
|
||||
if !$n.is_empty() {
|
||||
return Err($n.into());
|
||||
}
|
||||
__result
|
||||
}});
|
||||
|
||||
fn test_setup() -> Result<()> {
|
||||
let ppid = getppid();
|
||||
PtraceDumper::new(ppid.as_raw(), STOP_TIMEOUT, Default::default())?;
|
||||
fail_on_soft_error!(
|
||||
soft_errors,
|
||||
PtraceDumper::new_report_soft_errors(
|
||||
ppid.as_raw(),
|
||||
STOP_TIMEOUT,
|
||||
Default::default(),
|
||||
&mut soft_errors,
|
||||
)?
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_thread_list() -> Result<()> {
|
||||
let ppid = getppid();
|
||||
let dumper = PtraceDumper::new(ppid.as_raw(), STOP_TIMEOUT, Default::default())?;
|
||||
let dumper = fail_on_soft_error!(
|
||||
soft_errors,
|
||||
PtraceDumper::new_report_soft_errors(
|
||||
ppid.as_raw(),
|
||||
STOP_TIMEOUT,
|
||||
Default::default(),
|
||||
&mut soft_errors,
|
||||
)?
|
||||
);
|
||||
test!(!dumper.threads.is_empty(), "No threads");
|
||||
test!(
|
||||
dumper
|
||||
@@ -59,8 +87,17 @@ mod linux {
|
||||
use minidump_writer::mem_reader::MemReader;
|
||||
|
||||
let ppid = getppid().as_raw();
|
||||
let mut dumper = PtraceDumper::new(ppid, STOP_TIMEOUT, Default::default())?;
|
||||
dumper.suspend_threads()?;
|
||||
let mut dumper = fail_on_soft_error!(
|
||||
soft_errors,
|
||||
PtraceDumper::new_report_soft_errors(
|
||||
ppid,
|
||||
STOP_TIMEOUT,
|
||||
Default::default(),
|
||||
&mut soft_errors
|
||||
)?
|
||||
);
|
||||
|
||||
fail_on_soft_error!(soft_errors, dumper.suspend_threads(&mut soft_errors));
|
||||
|
||||
// We support 3 different methods of reading memory from another
|
||||
// process, ensure they all function and give the same results
|
||||
@@ -113,13 +150,22 @@ mod linux {
|
||||
|
||||
test!(heap_res == expected_heap, "heap var not correct");
|
||||
|
||||
dumper.resume_threads()?;
|
||||
fail_on_soft_error!(soft_errors, dumper.resume_threads(&mut soft_errors));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn test_find_mappings(addr1: usize, addr2: usize) -> Result<()> {
|
||||
let ppid = getppid();
|
||||
let dumper = PtraceDumper::new(ppid.as_raw(), STOP_TIMEOUT, Default::default())?;
|
||||
let dumper = fail_on_soft_error!(
|
||||
soft_errors,
|
||||
PtraceDumper::new_report_soft_errors(
|
||||
ppid.as_raw(),
|
||||
STOP_TIMEOUT,
|
||||
Default::default(),
|
||||
&mut soft_errors,
|
||||
)?
|
||||
);
|
||||
dumper
|
||||
.find_mapping(addr1)
|
||||
.ok_or("No mapping for addr1 found")?;
|
||||
@@ -136,8 +182,19 @@ mod linux {
|
||||
let ppid = getppid().as_raw();
|
||||
let exe_link = format!("/proc/{ppid}/exe");
|
||||
let exe_name = std::fs::read_link(exe_link)?.into_os_string();
|
||||
let mut dumper = PtraceDumper::new(ppid, STOP_TIMEOUT, Default::default())?;
|
||||
dumper.suspend_threads()?;
|
||||
|
||||
let mut dumper = fail_on_soft_error!(
|
||||
soft_errors,
|
||||
PtraceDumper::new_report_soft_errors(
|
||||
ppid,
|
||||
STOP_TIMEOUT,
|
||||
Default::default(),
|
||||
&mut soft_errors
|
||||
)?
|
||||
);
|
||||
|
||||
fail_on_soft_error!(soft_errors, dumper.suspend_threads(&mut soft_errors));
|
||||
|
||||
let mut found_exe = None;
|
||||
for (idx, mapping) in dumper.mappings.iter().enumerate() {
|
||||
if mapping.name.as_ref().map(|x| x.into()).as_ref() == Some(&exe_name) {
|
||||
@@ -147,7 +204,9 @@ mod linux {
|
||||
}
|
||||
let idx = found_exe.unwrap();
|
||||
let module_reader::BuildId(id) = dumper.from_process_memory_for_index(idx)?;
|
||||
dumper.resume_threads()?;
|
||||
|
||||
fail_on_soft_error!(soft_errors, dumper.resume_threads(&mut soft_errors));
|
||||
|
||||
assert!(!id.is_empty());
|
||||
assert!(id.iter().any(|&x| x > 0));
|
||||
Ok(())
|
||||
@@ -155,13 +214,21 @@ mod linux {
|
||||
|
||||
fn test_merged_mappings(path: String, mapped_mem: usize, mem_size: usize) -> Result<()> {
|
||||
// Now check that PtraceDumper interpreted the mappings properly.
|
||||
let dumper = PtraceDumper::new(getppid().as_raw(), STOP_TIMEOUT, Default::default())?;
|
||||
let dumper = fail_on_soft_error!(
|
||||
soft_errors,
|
||||
PtraceDumper::new_report_soft_errors(
|
||||
getppid().as_raw(),
|
||||
STOP_TIMEOUT,
|
||||
Default::default(),
|
||||
&mut soft_errors,
|
||||
)?
|
||||
);
|
||||
let mut mapping_count = 0;
|
||||
for map in &dumper.mappings {
|
||||
if map
|
||||
.name
|
||||
.as_ref()
|
||||
.map_or(false, |name| name.to_string_lossy().starts_with(&path))
|
||||
.is_some_and(|name| name.to_string_lossy().starts_with(&path))
|
||||
{
|
||||
mapping_count += 1;
|
||||
// This mapping should encompass the entire original mapped
|
||||
@@ -177,17 +244,29 @@ mod linux {
|
||||
|
||||
fn test_linux_gate_mapping_id() -> Result<()> {
|
||||
let ppid = getppid().as_raw();
|
||||
let mut dumper = PtraceDumper::new(ppid, STOP_TIMEOUT, Default::default())?;
|
||||
let mut dumper = fail_on_soft_error!(
|
||||
soft_errors,
|
||||
PtraceDumper::new_report_soft_errors(
|
||||
ppid,
|
||||
STOP_TIMEOUT,
|
||||
Default::default(),
|
||||
&mut soft_errors
|
||||
)?
|
||||
);
|
||||
let mut found_linux_gate = false;
|
||||
for mapping in dumper.mappings.clone() {
|
||||
if mapping.name == Some(LINUX_GATE_LIBRARY_NAME.into()) {
|
||||
found_linux_gate = true;
|
||||
dumper.suspend_threads()?;
|
||||
|
||||
fail_on_soft_error!(soft_errors, dumper.suspend_threads(&mut soft_errors));
|
||||
|
||||
let module_reader::BuildId(id) =
|
||||
PtraceDumper::from_process_memory_for_mapping(&mapping, ppid)?;
|
||||
test!(!id.is_empty(), "id-vec is empty");
|
||||
test!(id.iter().any(|&x| x > 0), "all id elements are 0");
|
||||
dumper.resume_threads()?;
|
||||
|
||||
fail_on_soft_error!(soft_errors, dumper.resume_threads(&mut soft_errors));
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -197,7 +276,15 @@ mod linux {
|
||||
|
||||
fn test_mappings_include_linux_gate() -> Result<()> {
|
||||
let ppid = getppid().as_raw();
|
||||
let dumper = PtraceDumper::new(ppid, STOP_TIMEOUT, Default::default())?;
|
||||
let dumper = fail_on_soft_error!(
|
||||
soft_errors,
|
||||
PtraceDumper::new_report_soft_errors(
|
||||
ppid,
|
||||
STOP_TIMEOUT,
|
||||
Default::default(),
|
||||
&mut soft_errors
|
||||
)?
|
||||
);
|
||||
let linux_gate_loc = dumper.auxv.get_linux_gate_address().unwrap();
|
||||
test!(linux_gate_loc != 0, "linux_gate_loc == 0");
|
||||
let mut found_linux_gate = false;
|
||||
@@ -205,7 +292,7 @@ mod linux {
|
||||
if mapping.name == Some(LINUX_GATE_LIBRARY_NAME.into()) {
|
||||
found_linux_gate = true;
|
||||
test!(
|
||||
linux_gate_loc == mapping.start_address.try_into()?,
|
||||
usize::try_from(linux_gate_loc)? == mapping.start_address,
|
||||
"linux_gate_loc != start_address"
|
||||
);
|
||||
|
||||
|
||||
@@ -1,15 +1,22 @@
|
||||
use crate::{
|
||||
use {
|
||||
crate::{
|
||||
mem_writer::{Buffer, MemoryArrayWriter, MemoryWriterError},
|
||||
minidump_format::MDRawDirectory,
|
||||
serializers::*,
|
||||
},
|
||||
std::io::{Error, Seek, Write},
|
||||
};
|
||||
use std::io::{Error, Seek, Write};
|
||||
|
||||
pub type DumpBuf = Buffer;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[derive(Debug, thiserror::Error, serde::Serialize)]
|
||||
pub enum FileWriterError {
|
||||
#[error("IO error")]
|
||||
IOError(#[from] Error),
|
||||
IOError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
Error,
|
||||
),
|
||||
#[error("Failed to write to memory")]
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
}
|
||||
|
||||
15
third_party/rust/minidump-writer/src/lib.rs
vendored
15
third_party/rust/minidump-writer/src/lib.rs
vendored
@@ -14,8 +14,19 @@ cfg_if::cfg_if! {
|
||||
}
|
||||
}
|
||||
|
||||
pub mod dir_section;
|
||||
pub mod mem_writer;
|
||||
pub mod minidump_cpu;
|
||||
pub mod minidump_format;
|
||||
|
||||
pub mod dir_section;
|
||||
pub mod mem_writer;
|
||||
mod serializers;
|
||||
|
||||
failspot::failspot_name! {
|
||||
pub enum FailSpotName {
|
||||
StopProcess,
|
||||
FillMissingAuxvInfo,
|
||||
ThreadName,
|
||||
SuspendThreads,
|
||||
CpuInfoFileOpen,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ pub mod minidump_writer;
|
||||
pub mod module_reader;
|
||||
pub mod ptrace_dumper;
|
||||
pub(crate) mod sections;
|
||||
mod serializers;
|
||||
pub mod thread_info;
|
||||
|
||||
pub use maps_reader::LINUX_GATE_LIBRARY_NAME;
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
pub use reader::ProcfsAuxvIter;
|
||||
use {
|
||||
crate::Pid,
|
||||
self::reader::ProcfsAuxvIter,
|
||||
crate::{serializers::*, Pid},
|
||||
error_graph::WriteErrorList,
|
||||
failspot::failspot,
|
||||
std::{fs::File, io::BufReader},
|
||||
thiserror::Error,
|
||||
};
|
||||
@@ -79,7 +81,11 @@ pub struct AuxvDumpInfo {
|
||||
}
|
||||
|
||||
impl AuxvDumpInfo {
|
||||
pub fn try_filling_missing_info(&mut self, pid: Pid) -> Result<(), AuxvError> {
|
||||
pub fn try_filling_missing_info(
|
||||
&mut self,
|
||||
pid: Pid,
|
||||
mut soft_errors: impl WriteErrorList<AuxvError>,
|
||||
) -> Result<(), AuxvError> {
|
||||
if self.is_complete() {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -87,9 +93,14 @@ impl AuxvDumpInfo {
|
||||
let auxv_path = format!("/proc/{pid}/auxv");
|
||||
let auxv_file = File::open(&auxv_path).map_err(|e| AuxvError::OpenError(auxv_path, e))?;
|
||||
|
||||
for AuxvPair { key, value } in
|
||||
ProcfsAuxvIter::new(BufReader::new(auxv_file)).filter_map(Result::ok)
|
||||
{
|
||||
for pair_result in ProcfsAuxvIter::new(BufReader::new(auxv_file)) {
|
||||
let AuxvPair { key, value } = match pair_result {
|
||||
Ok(pair) => pair,
|
||||
Err(e) => {
|
||||
soft_errors.push(e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let dest_field = match key {
|
||||
consts::AT_PHNUM => &mut self.program_header_count,
|
||||
consts::AT_PHDR => &mut self.program_header_address,
|
||||
@@ -102,6 +113,8 @@ impl AuxvDumpInfo {
|
||||
}
|
||||
}
|
||||
|
||||
failspot!(FillMissingAuxvInfo soft_errors.push(AuxvError::InvalidFormat));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub fn get_program_header_count(&self) -> Option<AuxvType> {
|
||||
@@ -124,14 +137,23 @@ impl AuxvDumpInfo {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum AuxvError {
|
||||
#[error("Failed to open file {0}")]
|
||||
OpenError(String, #[source] std::io::Error),
|
||||
OpenError(
|
||||
String,
|
||||
#[source]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
#[error("No auxv entry found for PID {0}")]
|
||||
NoAuxvEntryFound(Pid),
|
||||
#[error("Invalid auxv format (should not hit EOF before AT_NULL)")]
|
||||
InvalidFormat,
|
||||
#[error("IO Error")]
|
||||
IOError(#[from] std::io::Error),
|
||||
IOError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
}
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
use crate::errors::CpuInfoError;
|
||||
use crate::minidump_format::*;
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::path;
|
||||
use {
|
||||
crate::{errors::CpuInfoError, minidump_format::*},
|
||||
failspot::failspot,
|
||||
std::{
|
||||
io::{BufRead, BufReader},
|
||||
path,
|
||||
},
|
||||
};
|
||||
|
||||
type Result<T> = std::result::Result<T, CpuInfoError>;
|
||||
|
||||
@@ -44,6 +48,11 @@ pub fn write_cpu_information(sys_info: &mut MDRawSystemInfo) -> Result<()> {
|
||||
MDCPUArchitecture::PROCESSOR_ARCHITECTURE_AMD64
|
||||
} as u16;
|
||||
|
||||
failspot!(
|
||||
CpuInfoFileOpen
|
||||
bail(std::io::Error::other("test requested cpuinfo file failure"))
|
||||
);
|
||||
|
||||
let cpuinfo_file = std::fs::File::open(path::PathBuf::from("/proc/cpuinfo"))?;
|
||||
|
||||
let mut vendor_id = String::new();
|
||||
|
||||
279
third_party/rust/minidump-writer/src/linux/errors.rs
vendored
279
third_party/rust/minidump-writer/src/linux/errors.rs
vendored
@@ -1,31 +1,22 @@
|
||||
use crate::{
|
||||
dir_section::FileWriterError, maps_reader::MappingInfo, mem_writer::MemoryWriterError, Pid,
|
||||
use {
|
||||
super::{ptrace_dumper::InitError, serializers::*},
|
||||
crate::{
|
||||
dir_section::FileWriterError, maps_reader::MappingInfo, mem_writer::MemoryWriterError,
|
||||
serializers::*, Pid,
|
||||
},
|
||||
error_graph::ErrorList,
|
||||
std::ffi::OsString,
|
||||
thiserror::Error,
|
||||
};
|
||||
use goblin;
|
||||
use nix::errno::Errno;
|
||||
use std::ffi::OsString;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum InitError {
|
||||
#[error("failed to read auxv")]
|
||||
ReadAuxvFailed(crate::auxv::AuxvError),
|
||||
#[error("IO error for file {0}")]
|
||||
IOError(String, #[source] std::io::Error),
|
||||
#[error("crash thread does not reference principal mapping")]
|
||||
PrincipalMappingNotReferenced,
|
||||
#[error("Failed Android specific late init")]
|
||||
AndroidLateInitError(#[from] AndroidError),
|
||||
#[error("Failed to read the page size")]
|
||||
PageSizeError(#[from] Errno),
|
||||
#[error("Ptrace does not function within the same process")]
|
||||
CannotPtraceSameProcess,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
#[derive(Error, Debug, serde::Serialize)]
|
||||
pub enum MapsReaderError {
|
||||
#[error("Couldn't parse as ELF file")]
|
||||
ELFParsingFailed(#[from] goblin::error::Error),
|
||||
ELFParsingFailed(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_goblin_error")]
|
||||
goblin::error::Error,
|
||||
),
|
||||
#[error("No soname found (filename: {})", .0.to_string_lossy())]
|
||||
NoSoName(OsString, #[source] ModuleReaderError),
|
||||
|
||||
@@ -33,102 +24,168 @@ pub enum MapsReaderError {
|
||||
#[error("Map entry malformed: No {0} found")]
|
||||
MapEntryMalformed(&'static str),
|
||||
#[error("Couldn't parse address")]
|
||||
UnparsableInteger(#[from] std::num::ParseIntError),
|
||||
UnparsableInteger(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::num::ParseIntError,
|
||||
),
|
||||
#[error("Linux gate location doesn't fit in the required integer type")]
|
||||
LinuxGateNotConvertable(#[from] std::num::TryFromIntError),
|
||||
LinuxGateNotConvertable(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::num::TryFromIntError,
|
||||
),
|
||||
|
||||
// get_mmap()
|
||||
#[error("Not safe to open mapping {}", .0.to_string_lossy())]
|
||||
NotSafeToOpenMapping(OsString),
|
||||
#[error("IO Error")]
|
||||
FileError(#[from] std::io::Error),
|
||||
FileError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
#[error("Mmapped file empty or not an ELF file")]
|
||||
MmapSanityCheckFailed,
|
||||
#[error("Symlink does not match ({0} vs. {1})")]
|
||||
SymlinkError(std::path::PathBuf, std::path::PathBuf),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum CpuInfoError {
|
||||
#[error("IO error for file /proc/cpuinfo")]
|
||||
IOError(#[from] std::io::Error),
|
||||
IOError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
#[error("Not all entries of /proc/cpuinfo found!")]
|
||||
NotAllProcEntriesFound,
|
||||
#[error("Couldn't parse core from file")]
|
||||
UnparsableInteger(#[from] std::num::ParseIntError),
|
||||
UnparsableInteger(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::num::ParseIntError,
|
||||
),
|
||||
#[error("Couldn't parse cores: {0}")]
|
||||
UnparsableCores(String),
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
#[derive(Error, Debug, serde::Serialize)]
|
||||
pub enum ThreadInfoError {
|
||||
#[error("Index out of bounds: Got {0}, only have {1}")]
|
||||
IndexOutOfBounds(usize, usize),
|
||||
#[error("Either ppid ({1}) or tgid ({2}) not found in {0}")]
|
||||
InvalidPid(String, Pid, Pid),
|
||||
#[error("IO error")]
|
||||
IOError(#[from] std::io::Error),
|
||||
IOError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
#[error("Couldn't parse address")]
|
||||
UnparsableInteger(#[from] std::num::ParseIntError),
|
||||
UnparsableInteger(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::num::ParseIntError,
|
||||
),
|
||||
#[error("nix::ptrace() error")]
|
||||
PtraceError(#[from] nix::Error),
|
||||
PtraceError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_nix_error")]
|
||||
nix::Error,
|
||||
),
|
||||
#[error("Invalid line in /proc/{0}/status: {1}")]
|
||||
InvalidProcStatusFile(Pid, String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum AndroidError {
|
||||
#[error("Failed to copy memory from process")]
|
||||
CopyFromProcessError(#[from] DumperError),
|
||||
#[error("Failed slice conversion")]
|
||||
TryFromSliceError(#[from] std::array::TryFromSliceError),
|
||||
TryFromSliceError(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::array::TryFromSliceError,
|
||||
),
|
||||
#[error("No Android rel found")]
|
||||
NoRelFound,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
#[error("Copy from process {child} failed (source {src}, offset: {offset}, length: {length})")]
|
||||
pub struct CopyFromProcessError {
|
||||
pub child: Pid,
|
||||
pub src: usize,
|
||||
pub offset: usize,
|
||||
pub length: usize,
|
||||
#[serde(serialize_with = "serialize_nix_error")]
|
||||
pub source: nix::Error,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum DumperError {
|
||||
#[error("Failed to get PAGE_SIZE from system")]
|
||||
SysConfError(#[from] nix::Error),
|
||||
SysConfError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_nix_error")]
|
||||
nix::Error,
|
||||
),
|
||||
#[error("wait::waitpid(Pid={0}) failed")]
|
||||
WaitPidError(Pid, #[source] nix::Error),
|
||||
WaitPidError(
|
||||
Pid,
|
||||
#[source]
|
||||
#[serde(serialize_with = "serialize_nix_error")]
|
||||
nix::Error,
|
||||
),
|
||||
#[error("nix::ptrace::attach(Pid={0}) failed")]
|
||||
PtraceAttachError(Pid, #[source] nix::Error),
|
||||
PtraceAttachError(
|
||||
Pid,
|
||||
#[source]
|
||||
#[serde(serialize_with = "serialize_nix_error")]
|
||||
nix::Error,
|
||||
),
|
||||
#[error("nix::ptrace::detach(Pid={0}) failed")]
|
||||
PtraceDetachError(Pid, #[source] nix::Error),
|
||||
PtraceDetachError(
|
||||
Pid,
|
||||
#[source]
|
||||
#[serde(serialize_with = "serialize_nix_error")]
|
||||
nix::Error,
|
||||
),
|
||||
#[error(transparent)]
|
||||
CopyFromProcessError(#[from] CopyFromProcessError),
|
||||
#[error("Skipped thread {0} due to it being part of the seccomp sandbox's trusted code")]
|
||||
DetachSkippedThread(Pid),
|
||||
#[error("No threads left to suspend out of {0}")]
|
||||
SuspendNoThreadsLeft(usize),
|
||||
#[error("No mapping for stack pointer found")]
|
||||
NoStackPointerMapping,
|
||||
#[error("Failed slice conversion")]
|
||||
TryFromSliceError(#[from] std::array::TryFromSliceError),
|
||||
TryFromSliceError(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::array::TryFromSliceError,
|
||||
),
|
||||
#[error("Couldn't parse as ELF file")]
|
||||
ELFParsingFailed(#[from] goblin::error::Error),
|
||||
ELFParsingFailed(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_goblin_error")]
|
||||
goblin::error::Error,
|
||||
),
|
||||
#[error("Could not read value from module")]
|
||||
ModuleReaderError(#[from] ModuleReaderError),
|
||||
#[error("Not safe to open mapping: {}", .0.to_string_lossy())]
|
||||
NotSafeToOpenMapping(OsString),
|
||||
#[error("Failed integer conversion")]
|
||||
TryFromIntError(#[from] std::num::TryFromIntError),
|
||||
TryFromIntError(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::num::TryFromIntError,
|
||||
),
|
||||
#[error("Maps reader error")]
|
||||
MapsReaderError(#[from] MapsReaderError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum SectionAppMemoryError {
|
||||
#[error("Failed to copy memory from process")]
|
||||
CopyFromProcessError(#[from] DumperError),
|
||||
@@ -136,23 +193,31 @@ pub enum SectionAppMemoryError {
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum SectionExceptionStreamError {
|
||||
#[error("Failed to write to memory")]
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum SectionHandleDataStreamError {
|
||||
#[error("Failed to access file")]
|
||||
IOError(#[from] std::io::Error),
|
||||
IOError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
#[error("Failed to write to memory")]
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
#[error("Failed integer conversion")]
|
||||
TryFromIntError(#[from] std::num::TryFromIntError),
|
||||
TryFromIntError(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::num::TryFromIntError,
|
||||
),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum SectionMappingsError {
|
||||
#[error("Failed to write to memory")]
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
@@ -160,53 +225,75 @@ pub enum SectionMappingsError {
|
||||
GetEffectivePathError(MappingInfo, #[source] MapsReaderError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum SectionMemInfoListError {
|
||||
#[error("Failed to write to memory")]
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
#[error("Failed to read from procfs")]
|
||||
ProcfsError(#[from] procfs_core::ProcError),
|
||||
ProcfsError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_proc_error")]
|
||||
procfs_core::ProcError,
|
||||
),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum SectionMemListError {
|
||||
#[error("Failed to write to memory")]
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum SectionSystemInfoError {
|
||||
#[error("Failed to write to memory")]
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
#[error("Failed to get CPU Info")]
|
||||
CpuInfoError(#[from] CpuInfoError),
|
||||
#[error("Failed trying to write CPU information")]
|
||||
WriteCpuInformationFailed(#[source] CpuInfoError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum SectionThreadListError {
|
||||
#[error("Failed to write to memory")]
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
#[error("Failed integer conversion")]
|
||||
TryFromIntError(#[from] std::num::TryFromIntError),
|
||||
TryFromIntError(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::num::TryFromIntError,
|
||||
),
|
||||
#[error("Failed to copy memory from process")]
|
||||
CopyFromProcessError(#[from] DumperError),
|
||||
#[error("Failed to get thread info")]
|
||||
ThreadInfoError(#[from] ThreadInfoError),
|
||||
#[error("Failed to write to memory buffer")]
|
||||
IOError(#[from] std::io::Error),
|
||||
IOError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum SectionThreadNamesError {
|
||||
#[error("Failed integer conversion")]
|
||||
TryFromIntError(#[from] std::num::TryFromIntError),
|
||||
TryFromIntError(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::num::TryFromIntError,
|
||||
),
|
||||
#[error("Failed to write to memory")]
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
#[error("Failed to write to memory buffer")]
|
||||
IOError(#[from] std::io::Error),
|
||||
IOError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum SectionDsoDebugError {
|
||||
#[error("Failed to write to memory")]
|
||||
MemoryWriterError(#[from] MemoryWriterError),
|
||||
@@ -215,10 +302,14 @@ pub enum SectionDsoDebugError {
|
||||
#[error("Failed to copy memory from process")]
|
||||
CopyFromProcessError(#[from] DumperError),
|
||||
#[error("Failed to copy memory from process")]
|
||||
FromUTF8Error(#[from] std::string::FromUtf8Error),
|
||||
FromUTF8Error(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_from_utf8_error")]
|
||||
std::string::FromUtf8Error,
|
||||
),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum WriterError {
|
||||
#[error("Error during init phase")]
|
||||
InitError(#[from] InitError),
|
||||
@@ -249,15 +340,60 @@ pub enum WriterError {
|
||||
#[error("Failed to write to file")]
|
||||
FileWriterError(#[from] FileWriterError),
|
||||
#[error("Failed to get current timestamp when writing header of minidump")]
|
||||
SystemTimeError(#[from] std::time::SystemTimeError),
|
||||
SystemTimeError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_system_time_error")]
|
||||
std::time::SystemTimeError,
|
||||
),
|
||||
#[error("Errors occurred while initializing PTraceDumper")]
|
||||
InitErrors(#[source] ErrorList<InitError>),
|
||||
#[error("Errors occurred while suspending threads")]
|
||||
SuspendThreadsErrors(#[source] ErrorList<DumperError>),
|
||||
#[error("Errors occurred while resuming threads")]
|
||||
ResumeThreadsErrors(#[source] ErrorList<DumperError>),
|
||||
#[error("Crash thread does not reference principal mapping")]
|
||||
PrincipalMappingNotReferenced,
|
||||
#[error("Errors occurred while writing system info")]
|
||||
WriteSystemInfoErrors(#[source] ErrorList<SectionSystemInfoError>),
|
||||
#[error("Failed writing cpuinfo")]
|
||||
WriteCpuInfoFailed(#[source] MemoryWriterError),
|
||||
#[error("Failed writing thread proc status")]
|
||||
WriteThreadProcStatusFailed(#[source] MemoryWriterError),
|
||||
#[error("Failed writing OS Release Information")]
|
||||
WriteOsReleaseInfoFailed(#[source] MemoryWriterError),
|
||||
#[error("Failed writing process command line")]
|
||||
WriteCommandLineFailed(#[source] MemoryWriterError),
|
||||
#[error("Writing process environment failed")]
|
||||
WriteEnvironmentFailed(#[source] MemoryWriterError),
|
||||
#[error("Failed to write auxv file")]
|
||||
WriteAuxvFailed(#[source] MemoryWriterError),
|
||||
#[error("Failed to write maps file")]
|
||||
WriteMapsFailed(#[source] MemoryWriterError),
|
||||
#[error("Failed writing DSO Debug Stream")]
|
||||
WriteDSODebugStreamFailed(#[source] SectionDsoDebugError),
|
||||
#[error("Failed writing limits file")]
|
||||
WriteLimitsFailed(#[source] MemoryWriterError),
|
||||
#[error("Failed writing handle data stream")]
|
||||
WriteHandleDataStreamFailed(#[source] SectionHandleDataStreamError),
|
||||
#[error("Failed writing handle data stream direction entry")]
|
||||
WriteHandleDataStreamDirentFailed(#[source] FileWriterError),
|
||||
#[error("No threads left to suspend out of {0}")]
|
||||
SuspendNoThreadsLeft(usize),
|
||||
#[error("Failed to convert soft error list to JSON")]
|
||||
ConvertToJsonFailed(
|
||||
#[source]
|
||||
#[serde(skip)]
|
||||
serde_json::Error,
|
||||
),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum ModuleReaderError {
|
||||
#[error("failed to read module file ({path}): {error}")]
|
||||
MapFile {
|
||||
path: std::path::PathBuf,
|
||||
#[source]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
error: std::io::Error,
|
||||
},
|
||||
#[error("failed to read module memory: {length} bytes at {offset}{}: {error}", .start_address.map(|addr| format!(" (start address: {addr})")).unwrap_or_default())]
|
||||
@@ -266,10 +402,15 @@ pub enum ModuleReaderError {
|
||||
length: u64,
|
||||
start_address: Option<u64>,
|
||||
#[source]
|
||||
#[serde(serialize_with = "serialize_nix_error")]
|
||||
error: nix::Error,
|
||||
},
|
||||
#[error("failed to parse ELF memory: {0}")]
|
||||
Parsing(#[from] goblin::error::Error),
|
||||
Parsing(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_goblin_error")]
|
||||
goblin::error::Error,
|
||||
),
|
||||
#[error("no build id notes in program headers")]
|
||||
NoProgramHeaderNote,
|
||||
#[error("no string table available to locate note sections")]
|
||||
|
||||
@@ -1,19 +1,24 @@
|
||||
use crate::auxv::AuxvType;
|
||||
use crate::errors::MapsReaderError;
|
||||
use byteorder::{NativeEndian, ReadBytesExt};
|
||||
use goblin::elf;
|
||||
use memmap2::{Mmap, MmapOptions};
|
||||
use procfs_core::process::{MMPermissions, MMapPath, MemoryMaps};
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::os::unix::ffi::{OsStrExt, OsStringExt};
|
||||
use std::{fs::File, mem::size_of, path::PathBuf};
|
||||
use {
|
||||
crate::{auxv::AuxvType, errors::MapsReaderError},
|
||||
byteorder::{NativeEndian, ReadBytesExt},
|
||||
goblin::elf,
|
||||
memmap2::{Mmap, MmapOptions},
|
||||
procfs_core::process::{MMPermissions, MMapPath, MemoryMaps},
|
||||
std::{
|
||||
ffi::{OsStr, OsString},
|
||||
fs::File,
|
||||
mem::size_of,
|
||||
os::unix::ffi::{OsStrExt, OsStringExt},
|
||||
path::PathBuf,
|
||||
},
|
||||
};
|
||||
|
||||
pub const LINUX_GATE_LIBRARY_NAME: &str = "linux-gate.so";
|
||||
pub const DELETED_SUFFIX: &[u8] = b" (deleted)";
|
||||
|
||||
type Result<T> = std::result::Result<T, MapsReaderError>;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, serde::Serialize)]
|
||||
pub struct SystemMappingInfo {
|
||||
pub start_address: usize,
|
||||
pub end_address: usize,
|
||||
@@ -21,7 +26,7 @@ pub struct SystemMappingInfo {
|
||||
|
||||
// One of these is produced for each mapping in the process (i.e. line in
|
||||
// /proc/$x/maps).
|
||||
#[derive(Debug, PartialEq, Eq, Clone)]
|
||||
#[derive(Debug, PartialEq, Eq, Clone, serde::Serialize)]
|
||||
pub struct MappingInfo {
|
||||
// On Android, relocation packing can mean that the reported start
|
||||
// address of the mapping must be adjusted by a bias in order to
|
||||
@@ -88,7 +93,10 @@ impl MappingInfo {
|
||||
self.start_address + self.size
|
||||
}
|
||||
|
||||
pub fn aggregate(memory_maps: MemoryMaps, linux_gate_loc: AuxvType) -> Result<Vec<Self>> {
|
||||
pub fn aggregate(
|
||||
memory_maps: MemoryMaps,
|
||||
linux_gate_loc: Option<AuxvType>,
|
||||
) -> Result<Vec<Self>> {
|
||||
let mut infos = Vec::<Self>::new();
|
||||
|
||||
for mm in memory_maps {
|
||||
@@ -112,10 +120,12 @@ impl MappingInfo {
|
||||
|
||||
let is_path = is_mapping_a_path(pathname.as_deref());
|
||||
|
||||
if !is_path && linux_gate_loc != 0 && start_address == linux_gate_loc.try_into()? {
|
||||
if let Some(linux_gate_loc) = linux_gate_loc.map(|u| usize::try_from(u).unwrap()) {
|
||||
if !is_path && start_address == linux_gate_loc {
|
||||
pathname = Some(LINUX_GATE_LIBRARY_NAME.into());
|
||||
offset = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(prev_module) = infos.last_mut() {
|
||||
if (start_address == prev_module.end_address())
|
||||
@@ -451,7 +461,7 @@ mod tests {
|
||||
fn get_mappings_for(map: &str, linux_gate_loc: u64) -> Vec<MappingInfo> {
|
||||
MappingInfo::aggregate(
|
||||
MemoryMaps::from_read(map.as_bytes()).expect("failed to read mapping info"),
|
||||
linux_gate_loc,
|
||||
Some(linux_gate_loc),
|
||||
)
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
@@ -258,7 +258,7 @@ impl PtraceDumper {
|
||||
src: usize,
|
||||
length: usize,
|
||||
) -> Result<Vec<u8>, crate::errors::DumperError> {
|
||||
let length = std::num::NonZeroUsize::new(length).ok_or_else(|| {
|
||||
let length = std::num::NonZeroUsize::new(length).ok_or(
|
||||
crate::errors::DumperError::CopyFromProcessError(CopyFromProcessError {
|
||||
src,
|
||||
child: pid,
|
||||
@@ -268,8 +268,8 @@ impl PtraceDumper {
|
||||
// as EINVAL could also come from the syscalls that actually read
|
||||
// memory as well which could be confusing
|
||||
source: nix::errno::Errno::EINVAL,
|
||||
})
|
||||
})?;
|
||||
}),
|
||||
)?;
|
||||
|
||||
let mut mem = MemReader::new(pid);
|
||||
Ok(mem.read_to_vec(src, length)?)
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
pub use crate::linux::auxv::{AuxvType, DirectAuxvDumpInfo};
|
||||
use crate::{
|
||||
use {
|
||||
crate::{
|
||||
auxv::AuxvDumpInfo,
|
||||
dir_section::{DirSection, DumpBuf},
|
||||
linux::{
|
||||
app_memory::AppMemoryList,
|
||||
crash_context::CrashContext,
|
||||
dso_debug,
|
||||
errors::{InitError, WriterError},
|
||||
errors::WriterError,
|
||||
maps_reader::{MappingInfo, MappingList},
|
||||
ptrace_dumper::PtraceDumper,
|
||||
sections::*,
|
||||
@@ -14,10 +15,12 @@ use crate::{
|
||||
mem_writer::{Buffer, MemoryArrayWriter, MemoryWriter, MemoryWriterError},
|
||||
minidump_format::*,
|
||||
Pid,
|
||||
};
|
||||
use std::{
|
||||
},
|
||||
error_graph::{ErrorList, WriteErrorList},
|
||||
std::{
|
||||
io::{Seek, Write},
|
||||
time::Duration,
|
||||
},
|
||||
};
|
||||
|
||||
pub enum CrashingThreadContext {
|
||||
@@ -144,8 +147,24 @@ impl MinidumpWriter {
|
||||
.clone()
|
||||
.map(AuxvDumpInfo::from)
|
||||
.unwrap_or_default();
|
||||
let mut dumper = PtraceDumper::new(self.process_id, self.stop_timeout, auxv)?;
|
||||
dumper.suspend_threads()?;
|
||||
|
||||
let mut soft_errors = ErrorList::default();
|
||||
|
||||
let mut dumper = PtraceDumper::new_report_soft_errors(
|
||||
self.process_id,
|
||||
self.stop_timeout,
|
||||
auxv,
|
||||
soft_errors.subwriter(WriterError::InitErrors),
|
||||
)?;
|
||||
|
||||
let threads_count = dumper.threads.len();
|
||||
|
||||
dumper.suspend_threads(soft_errors.subwriter(WriterError::SuspendThreadsErrors));
|
||||
|
||||
if dumper.threads.is_empty() {
|
||||
soft_errors.push(WriterError::SuspendNoThreadsLeft(threads_count));
|
||||
}
|
||||
|
||||
dumper.late_init()?;
|
||||
|
||||
if self.skip_stacks_if_mapping_unreferenced {
|
||||
@@ -154,16 +173,12 @@ impl MinidumpWriter {
|
||||
}
|
||||
|
||||
if !self.crash_thread_references_principal_mapping(&dumper) {
|
||||
return Err(InitError::PrincipalMappingNotReferenced.into());
|
||||
soft_errors.push(WriterError::PrincipalMappingNotReferenced);
|
||||
}
|
||||
}
|
||||
|
||||
let mut buffer = Buffer::with_capacity(0);
|
||||
self.generate_dump(&mut buffer, &mut dumper, destination)?;
|
||||
|
||||
// dumper would resume threads in drop() automatically,
|
||||
// but in case there is an error, we want to catch it
|
||||
dumper.resume_threads()?;
|
||||
self.generate_dump(&mut buffer, &mut dumper, soft_errors, destination)?;
|
||||
|
||||
Ok(buffer.into())
|
||||
}
|
||||
@@ -226,11 +241,12 @@ impl MinidumpWriter {
|
||||
&mut self,
|
||||
buffer: &mut DumpBuf,
|
||||
dumper: &mut PtraceDumper,
|
||||
mut soft_errors: ErrorList<WriterError>,
|
||||
destination: &mut (impl Write + Seek),
|
||||
) -> Result<()> {
|
||||
// A minidump file contains a number of tagged streams. This is the number
|
||||
// of streams which we write.
|
||||
let num_writers = 17u32;
|
||||
let num_writers = 18u32;
|
||||
|
||||
let mut header_section = MemoryWriter::<MDRawHeader>::alloc(buffer)?;
|
||||
|
||||
@@ -270,7 +286,10 @@ impl MinidumpWriter {
|
||||
let dirent = exception_stream::write(self, buffer)?;
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
let dirent = systeminfo_stream::write(buffer)?;
|
||||
let dirent = systeminfo_stream::write(
|
||||
buffer,
|
||||
soft_errors.subwriter(WriterError::WriteSystemInfoErrors),
|
||||
)?;
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
let dirent = memory_info_list_stream::write(self, buffer)?;
|
||||
@@ -281,7 +300,10 @@ impl MinidumpWriter {
|
||||
stream_type: MDStreamType::LinuxCpuInfo as u32,
|
||||
location,
|
||||
},
|
||||
Err(_) => Default::default(),
|
||||
Err(e) => {
|
||||
soft_errors.push(WriterError::WriteCpuInfoFailed(e));
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
@@ -291,7 +313,10 @@ impl MinidumpWriter {
|
||||
stream_type: MDStreamType::LinuxProcStatus as u32,
|
||||
location,
|
||||
},
|
||||
Err(_) => Default::default(),
|
||||
Err(e) => {
|
||||
soft_errors.push(WriterError::WriteThreadProcStatusFailed(e));
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
@@ -303,7 +328,10 @@ impl MinidumpWriter {
|
||||
stream_type: MDStreamType::LinuxLsbRelease as u32,
|
||||
location,
|
||||
},
|
||||
Err(_) => Default::default(),
|
||||
Err(e) => {
|
||||
soft_errors.push(WriterError::WriteOsReleaseInfoFailed(e));
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
@@ -313,7 +341,10 @@ impl MinidumpWriter {
|
||||
stream_type: MDStreamType::LinuxCmdLine as u32,
|
||||
location,
|
||||
},
|
||||
Err(_) => Default::default(),
|
||||
Err(e) => {
|
||||
soft_errors.push(WriterError::WriteCommandLineFailed(e));
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
@@ -323,7 +354,10 @@ impl MinidumpWriter {
|
||||
stream_type: MDStreamType::LinuxEnviron as u32,
|
||||
location,
|
||||
},
|
||||
Err(_) => Default::default(),
|
||||
Err(e) => {
|
||||
soft_errors.push(WriterError::WriteEnvironmentFailed(e));
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
@@ -332,7 +366,10 @@ impl MinidumpWriter {
|
||||
stream_type: MDStreamType::LinuxAuxv as u32,
|
||||
location,
|
||||
},
|
||||
Err(_) => Default::default(),
|
||||
Err(e) => {
|
||||
soft_errors.push(WriterError::WriteAuxvFailed(e));
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
@@ -341,12 +378,21 @@ impl MinidumpWriter {
|
||||
stream_type: MDStreamType::LinuxMaps as u32,
|
||||
location,
|
||||
},
|
||||
Err(_) => Default::default(),
|
||||
Err(e) => {
|
||||
soft_errors.push(WriterError::WriteMapsFailed(e));
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
let dirent = dso_debug::write_dso_debug_stream(buffer, self.process_id, &dumper.auxv)
|
||||
.unwrap_or_default();
|
||||
let dirent = match dso_debug::write_dso_debug_stream(buffer, self.process_id, &dumper.auxv)
|
||||
{
|
||||
Ok(dirent) => dirent,
|
||||
Err(e) => {
|
||||
soft_errors.push(WriterError::WriteDSODebugStreamFailed(e));
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
let dirent = match self.write_file(buffer, &format!("/proc/{}/limits", self.blamed_thread))
|
||||
@@ -355,17 +401,43 @@ impl MinidumpWriter {
|
||||
stream_type: MDStreamType::MozLinuxLimits as u32,
|
||||
location,
|
||||
},
|
||||
Err(_) => Default::default(),
|
||||
Err(e) => {
|
||||
soft_errors.push(WriterError::WriteLimitsFailed(e));
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
let dirent = thread_names_stream::write(buffer, dumper)?;
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
// This section is optional, so we ignore errors when writing it
|
||||
if let Ok(dirent) = handle_data_stream::write(self, buffer) {
|
||||
let _ = dir_section.write_to_file(buffer, Some(dirent));
|
||||
let dirent = match handle_data_stream::write(self, buffer) {
|
||||
Ok(dirent) => dirent,
|
||||
Err(e) => {
|
||||
soft_errors.push(WriterError::WriteHandleDataStreamFailed(e));
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
// ========================================================================================
|
||||
//
|
||||
// PAST THIS BANNER, THE THREADS ARE RUNNING IN THE TARGET PROCESS AGAIN. IF YOU NEED TO
|
||||
// ADD NEW ENTRIES THAT ACCESS THE TARGET MEMORY, DO IT BEFORE HERE!
|
||||
//
|
||||
// ========================================================================================
|
||||
|
||||
// Collect any last-minute soft errors when trying to restart threads
|
||||
dumper.resume_threads(soft_errors.subwriter(WriterError::ResumeThreadsErrors));
|
||||
|
||||
// If this fails, there's really nothing we can do about that (other than ignore it).
|
||||
let dirent = write_soft_errors(buffer, soft_errors)
|
||||
.map(|location| MDRawDirectory {
|
||||
stream_type: MDStreamType::MozSoftErrors as u32,
|
||||
location,
|
||||
})
|
||||
.unwrap_or_default();
|
||||
dir_section.write_to_file(buffer, Some(dirent))?;
|
||||
|
||||
// If you add more directory entries, don't forget to update num_writers, above.
|
||||
Ok(())
|
||||
@@ -383,3 +455,13 @@ impl MinidumpWriter {
|
||||
Ok(section.location())
|
||||
}
|
||||
}
|
||||
|
||||
fn write_soft_errors(
|
||||
buffer: &mut DumpBuf,
|
||||
soft_errors: ErrorList<WriterError>,
|
||||
) -> Result<MDLocationDescriptor> {
|
||||
let soft_errors_json_str =
|
||||
serde_json::to_string_pretty(&soft_errors).map_err(WriterError::ConvertToJsonFailed)?;
|
||||
let section = MemoryArrayWriter::write_bytes(buffer, soft_errors_json_str.as_bytes());
|
||||
Ok(section.location())
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ impl<'buf> From<&'buf [u8]> for ProcessMemory<'buf> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'buf> From<ProcessReader> for ProcessMemory<'buf> {
|
||||
impl From<ProcessReader> for ProcessMemory<'_> {
|
||||
fn from(value: ProcessReader) -> Self {
|
||||
Self::Process(value)
|
||||
}
|
||||
@@ -207,7 +207,7 @@ impl<'a> DynIter<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for DynIter<'a> {
|
||||
impl Iterator for DynIter<'_> {
|
||||
type Item = Result<elf::dynamic::Dyn, Error>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
@@ -427,9 +427,9 @@ impl<'buf> ModuleReader<'buf> {
|
||||
let name = self
|
||||
.module_memory
|
||||
.read(strtab_offset + name_offset, strtab_size - name_offset)?;
|
||||
return CStr::from_bytes_until_nul(&name)
|
||||
CStr::from_bytes_until_nul(&name)
|
||||
.map(|s| s.to_string_lossy().into_owned())
|
||||
.map_err(|_| Error::StrTabNoNulByte);
|
||||
.map_err(|_| Error::StrTabNoNulByte)
|
||||
}
|
||||
|
||||
fn section_offset(&self, header: &elf::SectionHeader) -> u64 {
|
||||
|
||||
@@ -1,29 +1,44 @@
|
||||
#[cfg(target_os = "android")]
|
||||
use crate::linux::android::late_process_mappings;
|
||||
use crate::linux::{
|
||||
use {
|
||||
super::{
|
||||
auxv::AuxvError,
|
||||
errors::{AndroidError, MapsReaderError},
|
||||
serializers::*,
|
||||
},
|
||||
crate::{
|
||||
linux::{
|
||||
auxv::AuxvDumpInfo,
|
||||
errors::{DumperError, InitError, ThreadInfoError},
|
||||
errors::{DumperError, ThreadInfoError},
|
||||
maps_reader::MappingInfo,
|
||||
module_reader,
|
||||
thread_info::ThreadInfo,
|
||||
Pid,
|
||||
};
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
use crate::thread_info;
|
||||
use nix::{
|
||||
},
|
||||
serializers::*,
|
||||
},
|
||||
error_graph::{ErrorList, WriteErrorList},
|
||||
failspot::failspot,
|
||||
nix::{
|
||||
errno::Errno,
|
||||
sys::{ptrace, signal, wait},
|
||||
};
|
||||
use procfs_core::{
|
||||
},
|
||||
procfs_core::{
|
||||
process::{MMPermissions, ProcState, Stat},
|
||||
FromRead, ProcError,
|
||||
};
|
||||
use std::{
|
||||
},
|
||||
std::{
|
||||
ffi::OsString,
|
||||
path,
|
||||
result::Result,
|
||||
time::{Duration, Instant},
|
||||
},
|
||||
thiserror::Error,
|
||||
};
|
||||
|
||||
#[cfg(target_os = "android")]
|
||||
use crate::linux::android::late_process_mappings;
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
use crate::thread_info;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Thread {
|
||||
pub tid: Pid,
|
||||
@@ -48,24 +63,91 @@ pub const AT_SYSINFO_EHDR: u64 = 33;
|
||||
impl Drop for PtraceDumper {
|
||||
fn drop(&mut self) {
|
||||
// Always try to resume all threads (e.g. in case of error)
|
||||
let _ = self.resume_threads();
|
||||
self.resume_threads(error_graph::strategy::DontCare);
|
||||
// Always allow the process to continue.
|
||||
let _ = self.continue_process();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum StopProcessError {
|
||||
#[derive(Debug, Error, serde::Serialize)]
|
||||
pub enum InitError {
|
||||
#[error("failed to read auxv")]
|
||||
ReadAuxvFailed(#[source] crate::auxv::AuxvError),
|
||||
#[error("IO error for file {0}")]
|
||||
IOError(
|
||||
String,
|
||||
#[source]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
#[error("Failed Android specific late init")]
|
||||
AndroidLateInitError(#[from] AndroidError),
|
||||
#[error("Failed to read the page size")]
|
||||
PageSizeError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_nix_error")]
|
||||
nix::Error,
|
||||
),
|
||||
#[error("Ptrace does not function within the same process")]
|
||||
CannotPtraceSameProcess,
|
||||
#[error("Failed to stop the target process")]
|
||||
StopProcessFailed(#[source] StopProcessError),
|
||||
#[error("Errors occurred while filling missing Auxv info")]
|
||||
FillMissingAuxvInfoErrors(#[source] ErrorList<AuxvError>),
|
||||
#[error("Failed filling missing Auxv info")]
|
||||
FillMissingAuxvInfoFailed(#[source] AuxvError),
|
||||
#[error("Failed reading proc/pid/task entry for process")]
|
||||
ReadProcessThreadEntryFailed(
|
||||
#[source]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
#[error("Process task entry `{0:?}` could not be parsed as a TID")]
|
||||
ProcessTaskEntryNotTid(OsString),
|
||||
#[error("Failed to read thread name")]
|
||||
ReadThreadNameFailed(
|
||||
#[source]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
#[error("Proc task directory `{0:?}` is not a directory")]
|
||||
ProcPidTaskNotDirectory(String),
|
||||
#[error("Errors while enumerating threads")]
|
||||
EnumerateThreadsErrors(#[source] ErrorList<InitError>),
|
||||
#[error("Failed to enumerate threads")]
|
||||
EnumerateThreadsFailed(#[source] Box<InitError>),
|
||||
#[error("Failed to read process map file")]
|
||||
ReadProcessMapFileFailed(
|
||||
#[source]
|
||||
#[serde(serialize_with = "serialize_proc_error")]
|
||||
ProcError,
|
||||
),
|
||||
#[error("Failed to aggregate process mappings")]
|
||||
AggregateMappingsFailed(#[source] MapsReaderError),
|
||||
#[error("Failed to enumerate process mappings")]
|
||||
EnumerateMappingsFailed(#[source] Box<InitError>),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, serde::Serialize)]
|
||||
pub enum StopProcessError {
|
||||
#[error("Failed to stop the process")]
|
||||
Stop(#[from] Errno),
|
||||
Stop(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_nix_error")]
|
||||
nix::Error,
|
||||
),
|
||||
#[error("Failed to get the process state")]
|
||||
State(#[from] ProcError),
|
||||
State(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_proc_error")]
|
||||
ProcError,
|
||||
),
|
||||
#[error("Timeout waiting for process to stop")]
|
||||
Timeout,
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum ContinueProcessError {
|
||||
pub enum ContinueProcessError {
|
||||
#[error("Failed to continue the process")]
|
||||
Continue(#[from] Errno),
|
||||
}
|
||||
@@ -88,8 +170,13 @@ fn ptrace_detach(child: Pid) -> Result<(), DumperError> {
|
||||
|
||||
impl PtraceDumper {
|
||||
/// Constructs a dumper for extracting information from the specified process id
|
||||
pub fn new(pid: Pid, stop_timeout: Duration, auxv: AuxvDumpInfo) -> Result<Self, InitError> {
|
||||
if pid == std::process::id() as _ {
|
||||
pub fn new_report_soft_errors(
|
||||
pid: Pid,
|
||||
stop_timeout: Duration,
|
||||
auxv: AuxvDumpInfo,
|
||||
soft_errors: impl WriteErrorList<InitError>,
|
||||
) -> Result<Self, InitError> {
|
||||
if pid == std::process::id() as i32 {
|
||||
return Err(InitError::CannotPtraceSameProcess);
|
||||
}
|
||||
|
||||
@@ -101,23 +188,43 @@ impl PtraceDumper {
|
||||
mappings: Vec::new(),
|
||||
page_size: 0,
|
||||
};
|
||||
dumper.init(stop_timeout)?;
|
||||
dumper.init(stop_timeout, soft_errors)?;
|
||||
Ok(dumper)
|
||||
}
|
||||
|
||||
// TODO: late_init for chromeos and android
|
||||
pub fn init(&mut self, stop_timeout: Duration) -> Result<(), InitError> {
|
||||
pub fn init(
|
||||
&mut self,
|
||||
stop_timeout: Duration,
|
||||
mut soft_errors: impl WriteErrorList<InitError>,
|
||||
) -> Result<(), InitError> {
|
||||
// Stopping the process is best-effort.
|
||||
if let Err(e) = self.stop_process(stop_timeout) {
|
||||
log::warn!("failed to stop process {}: {e}", self.pid);
|
||||
soft_errors.push(InitError::StopProcessFailed(e));
|
||||
}
|
||||
|
||||
if let Err(e) = self.auxv.try_filling_missing_info(self.pid) {
|
||||
log::warn!("failed trying to fill in missing auxv info: {e}");
|
||||
// Even if we completely fail to fill in any additional Auxv info, we can still press
|
||||
// forward.
|
||||
if let Err(e) = self.auxv.try_filling_missing_info(
|
||||
self.pid,
|
||||
soft_errors.subwriter(InitError::FillMissingAuxvInfoErrors),
|
||||
) {
|
||||
soft_errors.push(InitError::FillMissingAuxvInfoFailed(e));
|
||||
}
|
||||
|
||||
// If we completely fail to enumerate any threads... Some information is still better than
|
||||
// no information!
|
||||
if let Err(e) =
|
||||
self.enumerate_threads(soft_errors.subwriter(InitError::EnumerateThreadsErrors))
|
||||
{
|
||||
soft_errors.push(InitError::EnumerateThreadsFailed(Box::new(e)));
|
||||
}
|
||||
|
||||
// Same with mappings -- Some information is still better than no information!
|
||||
if let Err(e) = self.enumerate_mappings() {
|
||||
soft_errors.push(InitError::EnumerateMappingsFailed(Box::new(e)));
|
||||
}
|
||||
|
||||
self.enumerate_threads()?;
|
||||
self.enumerate_mappings()?;
|
||||
self.page_size = nix::unistd::sysconf(nix::unistd::SysconfVar::PAGE_SIZE)?
|
||||
.expect("page size apparently unlimited: doesn't make sense.")
|
||||
as usize;
|
||||
@@ -207,42 +314,44 @@ impl PtraceDumper {
|
||||
ptrace_detach(child)
|
||||
}
|
||||
|
||||
pub fn suspend_threads(&mut self) -> Result<(), DumperError> {
|
||||
let threads_count = self.threads.len();
|
||||
pub fn suspend_threads(&mut self, mut soft_errors: impl WriteErrorList<DumperError>) {
|
||||
// Iterate over all threads and try to suspend them.
|
||||
// If the thread either disappeared before we could attach to it, or if
|
||||
// it was part of the seccomp sandbox's trusted code, it is OK to
|
||||
// silently drop it from the minidump.
|
||||
self.threads.retain(|x| Self::suspend_thread(x.tid).is_ok());
|
||||
self.threads.retain(|x| match Self::suspend_thread(x.tid) {
|
||||
Ok(()) => true,
|
||||
Err(e) => {
|
||||
soft_errors.push(e);
|
||||
false
|
||||
}
|
||||
});
|
||||
|
||||
if self.threads.is_empty() {
|
||||
Err(DumperError::SuspendNoThreadsLeft(threads_count))
|
||||
} else {
|
||||
self.threads_suspended = true;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
failspot::failspot!(<crate::FailSpotName>::SuspendThreads soft_errors.push(DumperError::PtraceAttachError(1234, nix::Error::EPERM)))
|
||||
}
|
||||
|
||||
pub fn resume_threads(&mut self) -> Result<(), DumperError> {
|
||||
let mut result = Ok(());
|
||||
pub fn resume_threads(&mut self, mut soft_errors: impl WriteErrorList<DumperError>) {
|
||||
if self.threads_suspended {
|
||||
for thread in &self.threads {
|
||||
match Self::resume_thread(thread.tid) {
|
||||
Ok(_) => {}
|
||||
x => {
|
||||
result = x;
|
||||
Ok(()) => (),
|
||||
Err(e) => {
|
||||
soft_errors.push(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
self.threads_suspended = false;
|
||||
result
|
||||
}
|
||||
|
||||
/// Send SIGSTOP to the process so that we can get a consistent state.
|
||||
///
|
||||
/// This will block waiting for the process to stop until `timeout` has passed.
|
||||
fn stop_process(&mut self, timeout: Duration) -> Result<(), StopProcessError> {
|
||||
failspot!(StopProcess bail(nix::Error::EPERM));
|
||||
|
||||
signal::kill(nix::unistd::Pid::from_raw(self.pid), Some(signal::SIGSTOP))?;
|
||||
|
||||
// Something like waitpid for non-child processes would be better, but we have no such
|
||||
@@ -273,30 +382,54 @@ impl PtraceDumper {
|
||||
|
||||
/// Parse /proc/$pid/task to list all the threads of the process identified by
|
||||
/// pid.
|
||||
fn enumerate_threads(&mut self) -> Result<(), InitError> {
|
||||
fn enumerate_threads(
|
||||
&mut self,
|
||||
mut soft_errors: impl WriteErrorList<InitError>,
|
||||
) -> Result<(), InitError> {
|
||||
let pid = self.pid;
|
||||
let filename = format!("/proc/{}/task", pid);
|
||||
let task_path = path::PathBuf::from(&filename);
|
||||
if task_path.is_dir() {
|
||||
std::fs::read_dir(task_path)
|
||||
.map_err(|e| InitError::IOError(filename, e))?
|
||||
.filter_map(|entry| entry.ok()) // Filter out bad entries
|
||||
.filter_map(|entry| {
|
||||
entry
|
||||
.file_name() // Parse name to Pid, filter out those that are unparsable
|
||||
.to_str()
|
||||
.and_then(|name| name.parse::<Pid>().ok())
|
||||
})
|
||||
.map(|tid| {
|
||||
// Read the thread-name (if there is any)
|
||||
let name = std::fs::read_to_string(format!("/proc/{}/task/{}/comm", pid, tid))
|
||||
// NOTE: This is a bit wasteful as it does two allocations in order to trim, but leaving it for now
|
||||
.map(|s| s.trim_end().to_string())
|
||||
.ok();
|
||||
(tid, name)
|
||||
})
|
||||
.for_each(|(tid, name)| self.threads.push(Thread { tid, name }));
|
||||
if !task_path.is_dir() {
|
||||
return Err(InitError::ProcPidTaskNotDirectory(filename));
|
||||
}
|
||||
|
||||
for entry in std::fs::read_dir(task_path).map_err(|e| InitError::IOError(filename, e))? {
|
||||
let entry = match entry {
|
||||
Ok(entry) => entry,
|
||||
Err(e) => {
|
||||
soft_errors.push(InitError::ReadProcessThreadEntryFailed(e));
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let file_name = entry.file_name();
|
||||
let tid = match file_name.to_str().and_then(|name| name.parse::<Pid>().ok()) {
|
||||
Some(tid) => tid,
|
||||
None => {
|
||||
soft_errors.push(InitError::ProcessTaskEntryNotTid(file_name));
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Read the thread-name (if there is any)
|
||||
let name_result = failspot!(if ThreadName {
|
||||
Err(std::io::Error::other(
|
||||
"testing requested failure reading thread name",
|
||||
))
|
||||
} else {
|
||||
std::fs::read_to_string(format!("/proc/{}/task/{}/comm", pid, tid))
|
||||
});
|
||||
|
||||
let name = match name_result {
|
||||
Ok(name) => Some(name.trim_end().to_string()),
|
||||
Err(e) => {
|
||||
soft_errors.push(InitError::ReadThreadNameFailed(e));
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
self.threads.push(Thread { tid, name });
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -308,39 +441,34 @@ impl PtraceDumper {
|
||||
// case its entry when creating the list of mappings.
|
||||
// See http://www.trilithium.com/johan/2005/08/linux-gate/ for more
|
||||
// information.
|
||||
let linux_gate_loc = self.auxv.get_linux_gate_address().unwrap_or_default();
|
||||
let maps_path = format!("/proc/{}/maps", self.pid);
|
||||
let maps_file =
|
||||
std::fs::File::open(&maps_path).map_err(|e| InitError::IOError(maps_path, e))?;
|
||||
|
||||
let maps = procfs_core::process::MemoryMaps::from_read(maps_file)
|
||||
.map_err(InitError::ReadProcessMapFileFailed)?;
|
||||
|
||||
self.mappings = MappingInfo::aggregate(maps, self.auxv.get_linux_gate_address())
|
||||
.map_err(InitError::AggregateMappingsFailed)?;
|
||||
|
||||
// Although the initial executable is usually the first mapping, it's not
|
||||
// guaranteed (see http://crosbug.com/25355); therefore, try to use the
|
||||
// actual entry point to find the mapping.
|
||||
let entry_point_loc = self.auxv.get_entry_address().unwrap_or_default();
|
||||
let filename = format!("/proc/{}/maps", self.pid);
|
||||
let errmap = |e| InitError::IOError(filename.clone(), e);
|
||||
let maps_path = path::PathBuf::from(&filename);
|
||||
let maps_file = std::fs::File::open(maps_path).map_err(errmap)?;
|
||||
|
||||
use procfs_core::FromRead;
|
||||
self.mappings = procfs_core::process::MemoryMaps::from_read(maps_file)
|
||||
.ok()
|
||||
.and_then(|maps| MappingInfo::aggregate(maps, linux_gate_loc).ok())
|
||||
.unwrap_or_default();
|
||||
|
||||
if entry_point_loc != 0 {
|
||||
let mut swap_idx = None;
|
||||
for (idx, module) in self.mappings.iter().enumerate() {
|
||||
if let Some(entry_point_loc) = self
|
||||
.auxv
|
||||
.get_entry_address()
|
||||
.map(|u| usize::try_from(u).unwrap())
|
||||
{
|
||||
// If this module contains the entry-point, and it's not already the first
|
||||
// one, then we need to make it be first. This is because the minidump
|
||||
// format assumes the first module is the one that corresponds to the main
|
||||
// executable (as codified in
|
||||
// processor/minidump.cc:MinidumpModuleList::GetMainModule()).
|
||||
if entry_point_loc >= module.start_address.try_into().unwrap()
|
||||
&& entry_point_loc < (module.start_address + module.size).try_into().unwrap()
|
||||
{
|
||||
swap_idx = Some(idx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if let Some(idx) = swap_idx {
|
||||
self.mappings.swap(0, idx);
|
||||
if let Some(entry_mapping_idx) = self.mappings.iter().position(|mapping| {
|
||||
(mapping.start_address..mapping.start_address + mapping.size)
|
||||
.contains(&entry_point_loc)
|
||||
}) {
|
||||
self.mappings.swap(0, entry_mapping_idx);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
use super::*;
|
||||
use crate::linux::dumper_cpu_info as dci;
|
||||
use {
|
||||
super::*, crate::linux::dumper_cpu_info as dci, error_graph::WriteErrorList,
|
||||
errors::SectionSystemInfoError,
|
||||
};
|
||||
|
||||
pub fn write(buffer: &mut DumpBuf) -> Result<MDRawDirectory, errors::SectionSystemInfoError> {
|
||||
pub fn write(
|
||||
buffer: &mut DumpBuf,
|
||||
mut soft_errors: impl WriteErrorList<SectionSystemInfoError>,
|
||||
) -> Result<MDRawDirectory, SectionSystemInfoError> {
|
||||
let mut info_section = MemoryWriter::<MDRawSystemInfo>::alloc(buffer)?;
|
||||
let dirent = MDRawDirectory {
|
||||
stream_type: MDStreamType::SystemInfoStream as u32,
|
||||
@@ -16,7 +21,9 @@ pub fn write(buffer: &mut DumpBuf) -> Result<MDRawDirectory, errors::SectionSyst
|
||||
info.platform_id = platform_id as u32;
|
||||
info.csd_version_rva = os_version_loc.rva;
|
||||
|
||||
dci::write_cpu_information(&mut info)?;
|
||||
if let Err(e) = dci::write_cpu_information(&mut info) {
|
||||
soft_errors.push(SectionSystemInfoError::WriteCpuInformationFailed(e));
|
||||
}
|
||||
|
||||
info_section.set_value(buffer, info)?;
|
||||
Ok(dirent)
|
||||
|
||||
40
third_party/rust/minidump-writer/src/linux/serializers.rs
vendored
Normal file
40
third_party/rust/minidump-writer/src/linux/serializers.rs
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
//! Functions used by Serde to serialize types that we don't own (and thus can't implement
|
||||
//! [Serialize] for)
|
||||
|
||||
use {crate::serializers::*, serde::Serializer};
|
||||
|
||||
/// Serialize [goblin::error::Error]
|
||||
pub fn serialize_goblin_error<S: Serializer>(
|
||||
error: &goblin::error::Error,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error> {
|
||||
serialize_generic_error(error, serializer)
|
||||
}
|
||||
/// Serialize [nix::Error]
|
||||
pub fn serialize_nix_error<S: Serializer>(
|
||||
error: &nix::Error,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error> {
|
||||
serialize_generic_error(error, serializer)
|
||||
}
|
||||
/// Serialize [procfs_core::ProcError]
|
||||
pub fn serialize_proc_error<S: Serializer>(
|
||||
error: &procfs_core::ProcError,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error> {
|
||||
serialize_generic_error(error, serializer)
|
||||
}
|
||||
/// Serialize [std::string::FromUtf8Error]
|
||||
pub fn serialize_from_utf8_error<S: Serializer>(
|
||||
error: &std::string::FromUtf8Error,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error> {
|
||||
serialize_generic_error(error, serializer)
|
||||
}
|
||||
/// Serialize [std::time::SystemTimeError]
|
||||
pub fn serialize_system_time_error<S: Serializer>(
|
||||
error: &std::time::SystemTimeError,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error> {
|
||||
serialize_generic_error(error, serializer)
|
||||
}
|
||||
@@ -1,14 +1,31 @@
|
||||
use crate::minidump_format::{MDLocationDescriptor, MDRVA};
|
||||
use scroll::ctx::{SizeWith, TryIntoCtx};
|
||||
use {
|
||||
crate::{
|
||||
minidump_format::{MDLocationDescriptor, MDRVA},
|
||||
serializers::*,
|
||||
},
|
||||
scroll::ctx::{SizeWith, TryIntoCtx},
|
||||
};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[derive(Debug, thiserror::Error, serde::Serialize)]
|
||||
pub enum MemoryWriterError {
|
||||
#[error("IO error when writing to DumpBuf")]
|
||||
IOError(#[from] std::io::Error),
|
||||
IOError(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_io_error")]
|
||||
std::io::Error,
|
||||
),
|
||||
#[error("Failed integer conversion")]
|
||||
TryFromIntError(#[from] std::num::TryFromIntError),
|
||||
TryFromIntError(
|
||||
#[from]
|
||||
#[serde(skip)]
|
||||
std::num::TryFromIntError,
|
||||
),
|
||||
#[error("Failed to write to buffer")]
|
||||
Scroll(#[from] scroll::Error),
|
||||
Scroll(
|
||||
#[from]
|
||||
#[serde(serialize_with = "serialize_scroll_error")]
|
||||
scroll::Error,
|
||||
),
|
||||
}
|
||||
|
||||
type WriteResult<T> = std::result::Result<T, MemoryWriterError>;
|
||||
|
||||
30
third_party/rust/minidump-writer/src/serializers.rs
vendored
Normal file
30
third_party/rust/minidump-writer/src/serializers.rs
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
//! Functions used by Serde to serialize types that we don't own (and thus can't implement
|
||||
//! [Serialize] for)
|
||||
|
||||
use serde::Serializer;
|
||||
/// Useful for types that implement [Error][std::error::Error] and don't need any special
|
||||
/// treatment.
|
||||
pub fn serialize_generic_error<S: Serializer, E: std::error::Error>(
|
||||
error: &E,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error> {
|
||||
// I guess we'll have to see if it's more useful to store the debug representation of a
|
||||
// foreign error type or something else (like maybe iterating its error chain into a
|
||||
// list?)
|
||||
let dbg = format!("{error:#?}");
|
||||
serializer.serialize_str(&dbg)
|
||||
}
|
||||
/// Serialize [std::io::Error]
|
||||
pub fn serialize_io_error<S: Serializer>(
|
||||
error: &std::io::Error,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error> {
|
||||
serialize_generic_error(error, serializer)
|
||||
}
|
||||
/// Serialize [scroll::Error]
|
||||
pub fn serialize_scroll_error<S: Serializer>(
|
||||
error: &scroll::Error,
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error> {
|
||||
serialize_generic_error(error, serializer)
|
||||
}
|
||||
@@ -94,3 +94,38 @@ pub fn start_child_and_return(args: &[&str]) -> Child {
|
||||
.spawn()
|
||||
.expect("failed to execute child")
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub fn read_minidump_soft_errors_or_panic<'a, T>(
|
||||
dump: &minidump::Minidump<'a, T>,
|
||||
) -> serde_json::Value
|
||||
where
|
||||
T: std::ops::Deref<Target = [u8]> + 'a,
|
||||
{
|
||||
let contents = std::str::from_utf8(
|
||||
dump.get_raw_stream(minidump_common::format::MINIDUMP_STREAM_TYPE::MozSoftErrors.into())
|
||||
.expect("missing soft error stream"),
|
||||
)
|
||||
.expect("expected utf-8 stream");
|
||||
|
||||
serde_json::from_str(contents).expect("expected json")
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub fn assert_soft_errors_in_minidump<'a, 'b, T, I>(
|
||||
dump: &minidump::Minidump<'a, T>,
|
||||
expected_errors: I,
|
||||
) where
|
||||
T: std::ops::Deref<Target = [u8]> + 'a,
|
||||
I: IntoIterator<Item = &'b serde_json::Value>,
|
||||
{
|
||||
let actual_json = read_minidump_soft_errors_or_panic(dump);
|
||||
let actual_errors = actual_json.as_array().unwrap();
|
||||
|
||||
// Ensure that every error we expect is in the actual list somewhere
|
||||
for expected_error in expected_errors {
|
||||
assert!(actual_errors
|
||||
.iter()
|
||||
.any(|actual_error| actual_error == expected_error));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
#![cfg(any(target_os = "linux", target_os = "android"))]
|
||||
#![allow(unused_imports, unused_variables)]
|
||||
|
||||
use minidump::*;
|
||||
use minidump_common::format::{GUID, MINIDUMP_STREAM_TYPE::*};
|
||||
use minidump_writer::{
|
||||
use {
|
||||
common::*,
|
||||
minidump::*,
|
||||
minidump_common::format::{GUID, MINIDUMP_STREAM_TYPE::*},
|
||||
minidump_writer::{
|
||||
app_memory::AppMemory,
|
||||
crash_context::CrashContext,
|
||||
errors::*,
|
||||
@@ -12,19 +14,19 @@ use minidump_writer::{
|
||||
module_reader::{BuildId, ReadFromModule},
|
||||
ptrace_dumper::PtraceDumper,
|
||||
Pid,
|
||||
};
|
||||
use nix::{errno::Errno, sys::signal::Signal};
|
||||
use procfs_core::process::MMPermissions;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use std::{
|
||||
},
|
||||
nix::{errno::Errno, sys::signal::Signal},
|
||||
procfs_core::process::MMPermissions,
|
||||
serde_json::json,
|
||||
std::{
|
||||
collections::HashSet,
|
||||
io::{BufRead, BufReader},
|
||||
os::unix::process::ExitStatusExt,
|
||||
process::{Command, Stdio},
|
||||
},
|
||||
};
|
||||
|
||||
mod common;
|
||||
use common::*;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum Context {
|
||||
@@ -299,6 +301,10 @@ contextual_test! {
|
||||
|
||||
contextual_test! {
|
||||
fn skip_if_requested(context: Context) {
|
||||
let expected_errors = vec![
|
||||
json!("PrincipalMappingNotReferenced"),
|
||||
];
|
||||
|
||||
let num_of_threads = 1;
|
||||
let mut child = start_child_and_wait_for_threads(num_of_threads);
|
||||
let pid = child.id() as i32;
|
||||
@@ -331,7 +337,9 @@ contextual_test! {
|
||||
assert_eq!(waitres.code(), None);
|
||||
assert_eq!(status, Signal::SIGKILL as i32);
|
||||
|
||||
assert!(res.is_err());
|
||||
// Ensure the MozSoftErrors stream contains the expected errors
|
||||
let dump = Minidump::read_path(tmpfile.path()).expect("failed to read minidump");
|
||||
assert_soft_errors_in_minidump(&dump, &expected_errors);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -790,6 +798,7 @@ fn memory_info_list_stream() {
|
||||
.dump(&mut tmpfile)
|
||||
.expect("cound not write minidump");
|
||||
child.kill().expect("Failed to kill process");
|
||||
child.wait().expect("Failed to wait on killed process");
|
||||
|
||||
// Ensure the minidump has a MemoryInfoListStream present and has at least one entry.
|
||||
let dump = Minidump::read_path(tmpfile.path()).expect("failed to read minidump");
|
||||
|
||||
92
third_party/rust/minidump-writer/tests/linux_minidump_writer_soft_error.rs
vendored
Normal file
92
third_party/rust/minidump-writer/tests/linux_minidump_writer_soft_error.rs
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
#![cfg(any(target_os = "linux", target_os = "android"))]
|
||||
|
||||
use {
|
||||
common::*,
|
||||
minidump::Minidump,
|
||||
minidump_writer::{minidump_writer::MinidumpWriter, FailSpotName},
|
||||
serde_json::json,
|
||||
};
|
||||
|
||||
mod common;
|
||||
|
||||
#[test]
|
||||
fn soft_error_stream() {
|
||||
let mut child = start_child_and_wait_for_threads(1);
|
||||
let pid = child.id() as i32;
|
||||
|
||||
let mut tmpfile = tempfile::Builder::new()
|
||||
.prefix("soft_error_stream")
|
||||
.tempfile()
|
||||
.unwrap();
|
||||
|
||||
let mut fail_client = FailSpotName::testing_client();
|
||||
fail_client.set_enabled(FailSpotName::StopProcess, true);
|
||||
|
||||
// Write a minidump
|
||||
MinidumpWriter::new(pid, pid)
|
||||
.dump(&mut tmpfile)
|
||||
.expect("cound not write minidump");
|
||||
child.kill().expect("Failed to kill process");
|
||||
child.wait().expect("Failed to wait on killed process");
|
||||
|
||||
// Ensure the minidump has a MozSoftErrors present
|
||||
let dump = Minidump::read_path(tmpfile.path()).expect("failed to read minidump");
|
||||
read_minidump_soft_errors_or_panic(&dump);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn soft_error_stream_content() {
|
||||
let expected_errors = vec![
|
||||
json!({"InitErrors": [
|
||||
{"StopProcessFailed": {"Stop": "EPERM"}},
|
||||
{"FillMissingAuxvInfoErrors": ["InvalidFormat"]},
|
||||
{"EnumerateThreadsErrors": [
|
||||
{"ReadThreadNameFailed": "\
|
||||
Custom {\n \
|
||||
kind: Other,\n \
|
||||
error: \"testing requested failure reading thread name\",\n\
|
||||
}"
|
||||
}
|
||||
]}
|
||||
]}),
|
||||
json!({"SuspendThreadsErrors": [{"PtraceAttachError": [1234, "EPERM"]}]}),
|
||||
json!({"WriteSystemInfoErrors": [
|
||||
{"WriteCpuInformationFailed": {"IOError": "\
|
||||
Custom {\n \
|
||||
kind: Other,\n \
|
||||
error: \"test requested cpuinfo file failure\",\n\
|
||||
}"
|
||||
}}
|
||||
]}),
|
||||
];
|
||||
|
||||
let mut child = start_child_and_wait_for_threads(1);
|
||||
let pid = child.id() as i32;
|
||||
|
||||
let mut tmpfile = tempfile::Builder::new()
|
||||
.prefix("soft_error_stream_content")
|
||||
.tempfile()
|
||||
.unwrap();
|
||||
|
||||
let mut fail_client = FailSpotName::testing_client();
|
||||
for name in [
|
||||
FailSpotName::StopProcess,
|
||||
FailSpotName::FillMissingAuxvInfo,
|
||||
FailSpotName::ThreadName,
|
||||
FailSpotName::SuspendThreads,
|
||||
FailSpotName::CpuInfoFileOpen,
|
||||
] {
|
||||
fail_client.set_enabled(name, true);
|
||||
}
|
||||
|
||||
// Write a minidump
|
||||
MinidumpWriter::new(pid, pid)
|
||||
.dump(&mut tmpfile)
|
||||
.expect("cound not write minidump");
|
||||
child.kill().expect("Failed to kill process");
|
||||
child.wait().expect("Failed to wait on killed process");
|
||||
|
||||
// Ensure the MozSoftErrors stream contains the expected errors
|
||||
let dump = Minidump::read_path(tmpfile.path()).expect("failed to read minidump");
|
||||
assert_soft_errors_in_minidump(&dump, &expected_errors);
|
||||
}
|
||||
@@ -140,87 +140,87 @@ fn dump_external_process() {
|
||||
}
|
||||
}
|
||||
|
||||
/// Validates we can actually walk the stack for each thread in the minidump,
|
||||
/// this is using minidump-processor, which (currently) depends on breakpad
|
||||
/// symbols, however https://github.com/mozilla/dump_syms is not available as
|
||||
/// a library https://github.com/mozilla/dump_syms/issues/253, so we just require
|
||||
/// that it already be installed, hence the ignore
|
||||
#[test]
|
||||
fn stackwalks() {
|
||||
if std::env::var("CI").is_ok() {
|
||||
println!("test disabled, consistently times out because of potato runners");
|
||||
return;
|
||||
}
|
||||
// /// Validates we can actually walk the stack for each thread in the minidump,
|
||||
// /// this is using minidump-processor, which (currently) depends on breakpad
|
||||
// /// symbols, however https://github.com/mozilla/dump_syms is not available as
|
||||
// /// a library https://github.com/mozilla/dump_syms/issues/253, so we just require
|
||||
// /// that it already be installed, hence the ignore
|
||||
// #[test]
|
||||
// fn stackwalks() {
|
||||
// if std::env::var("CI").is_ok() {
|
||||
// println!("test disabled, consistently times out because of potato runners");
|
||||
// return;
|
||||
// }
|
||||
|
||||
println!("generating minidump...");
|
||||
let md = capture_minidump("stackwalks", mach2::exception_types::EXC_BREAKPOINT);
|
||||
// println!("generating minidump...");
|
||||
// let md = capture_minidump("stackwalks", mach2::exception_types::EXC_BREAKPOINT);
|
||||
|
||||
// Generate the breakpad symbols
|
||||
println!("generating symbols...");
|
||||
dump_syms::dumper::single_file(
|
||||
&dump_syms::dumper::Config {
|
||||
output: dump_syms::dumper::Output::Store(".test-symbols".into()),
|
||||
symbol_server: None,
|
||||
debug_id: None,
|
||||
code_id: None,
|
||||
arch: if cfg!(target_arch = "aarch64") {
|
||||
"arm64"
|
||||
} else if cfg!(target_arch = "x86_64") {
|
||||
"x86_64"
|
||||
} else {
|
||||
panic!("invalid MacOS target architecture")
|
||||
},
|
||||
num_jobs: 2, // default this
|
||||
check_cfi: false,
|
||||
emit_inlines: false,
|
||||
mapping_var: None,
|
||||
mapping_src: None,
|
||||
mapping_dest: None,
|
||||
mapping_file: None,
|
||||
},
|
||||
"target/debug/test",
|
||||
)
|
||||
.expect("failed to dump symbols");
|
||||
// // Generate the breakpad symbols
|
||||
// println!("generating symbols...");
|
||||
// dump_syms::dumper::single_file(
|
||||
// &dump_syms::dumper::Config {
|
||||
// output: dump_syms::dumper::Output::Store(".test-symbols".into()),
|
||||
// symbol_server: None,
|
||||
// debug_id: None,
|
||||
// code_id: None,
|
||||
// arch: if cfg!(target_arch = "aarch64") {
|
||||
// "arm64"
|
||||
// } else if cfg!(target_arch = "x86_64") {
|
||||
// "x86_64"
|
||||
// } else {
|
||||
// panic!("invalid MacOS target architecture")
|
||||
// },
|
||||
// num_jobs: 2, // default this
|
||||
// check_cfi: false,
|
||||
// emit_inlines: false,
|
||||
// mapping_var: None,
|
||||
// mapping_src: None,
|
||||
// mapping_dest: None,
|
||||
// mapping_file: None,
|
||||
// },
|
||||
// "target/debug/test",
|
||||
// )
|
||||
// .expect("failed to dump symbols");
|
||||
|
||||
let provider = minidump_unwind::Symbolizer::new(minidump_unwind::simple_symbol_supplier(vec![
|
||||
".test-symbols".into(),
|
||||
]));
|
||||
// let provider = minidump_unwind::Symbolizer::new(minidump_unwind::simple_symbol_supplier(vec![
|
||||
// ".test-symbols".into(),
|
||||
// ]));
|
||||
|
||||
let state = futures::executor::block_on(async {
|
||||
minidump_processor::process_minidump(&md.minidump, &provider).await
|
||||
})
|
||||
.unwrap();
|
||||
// let state = futures::executor::block_on(async {
|
||||
// minidump_processor::process_minidump(&md.minidump, &provider).await
|
||||
// })
|
||||
// .unwrap();
|
||||
|
||||
//state.print(&mut std::io::stdout()).map_err(|_| ()).unwrap();
|
||||
// //state.print(&mut std::io::stdout()).map_err(|_| ()).unwrap();
|
||||
|
||||
// We expect at least 2 threads, one of which is the fake crashing thread
|
||||
let fake_crash_thread = state
|
||||
.threads
|
||||
.iter()
|
||||
.find(|cs| cs.thread_id == md.thread)
|
||||
.expect("failed to find crash thread");
|
||||
// // We expect at least 2 threads, one of which is the fake crashing thread
|
||||
// let fake_crash_thread = state
|
||||
// .threads
|
||||
// .iter()
|
||||
// .find(|cs| cs.thread_id == md.thread)
|
||||
// .expect("failed to find crash thread");
|
||||
|
||||
assert_eq!(
|
||||
fake_crash_thread.thread_name.as_deref(),
|
||||
Some("test-thread")
|
||||
);
|
||||
// assert_eq!(
|
||||
// fake_crash_thread.thread_name.as_deref(),
|
||||
// Some("test-thread")
|
||||
// );
|
||||
|
||||
assert!(
|
||||
fake_crash_thread.frames.iter().any(|sf| {
|
||||
sf.function_name
|
||||
.as_ref()
|
||||
.map_or(false, |fname| fname.ends_with("wait_until_killed"))
|
||||
}),
|
||||
"unable to locate expected function"
|
||||
);
|
||||
// assert!(
|
||||
// fake_crash_thread.frames.iter().any(|sf| {
|
||||
// sf.function_name
|
||||
// .as_ref()
|
||||
// .map_or(false, |fname| fname.ends_with("wait_until_killed"))
|
||||
// }),
|
||||
// "unable to locate expected function"
|
||||
// );
|
||||
|
||||
let mod_list: MinidumpModuleList = md
|
||||
.minidump
|
||||
.get_stream()
|
||||
.expect("Couldn't find MinidumpModuleList");
|
||||
// let mod_list: MinidumpModuleList = md
|
||||
// .minidump
|
||||
// .get_stream()
|
||||
// .expect("Couldn't find MinidumpModuleList");
|
||||
|
||||
// Ensure we found dyld
|
||||
assert!(mod_list
|
||||
.iter()
|
||||
.any(|module| &module.name == "/usr/lib/dyld"));
|
||||
}
|
||||
// // Ensure we found dyld
|
||||
// assert!(mod_list
|
||||
// .iter()
|
||||
// .any(|module| &module.name == "/usr/lib/dyld"));
|
||||
// }
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
//! All of these tests are specific to ptrace
|
||||
#![cfg(any(target_os = "linux", target_os = "android"))]
|
||||
|
||||
use minidump_writer::ptrace_dumper::PtraceDumper;
|
||||
use nix::sys::mman::{mmap, MapFlags, ProtFlags};
|
||||
use nix::sys::signal::Signal;
|
||||
use std::convert::TryInto;
|
||||
use std::io::{BufRead, BufReader};
|
||||
use std::mem::size_of;
|
||||
use std::os::unix::process::ExitStatusExt;
|
||||
use {
|
||||
error_graph::ErrorList,
|
||||
minidump_writer::ptrace_dumper::PtraceDumper,
|
||||
nix::{
|
||||
sys::mman::{mmap, MapFlags, ProtFlags},
|
||||
sys::signal::Signal,
|
||||
},
|
||||
std::{
|
||||
convert::TryInto,
|
||||
io::{BufRead, BufReader},
|
||||
mem::size_of,
|
||||
os::unix::process::ExitStatusExt,
|
||||
},
|
||||
};
|
||||
|
||||
mod common;
|
||||
use common::*;
|
||||
@@ -23,6 +30,13 @@ macro_rules! disabled_on_ci_and_android {
|
||||
};
|
||||
}
|
||||
|
||||
macro_rules! assert_no_soft_errors(($n: ident, $e: expr) => {{
|
||||
let mut $n = ErrorList::default();
|
||||
let __result = $e;
|
||||
assert!($n.is_empty(), "{:?}", $n);
|
||||
__result
|
||||
}});
|
||||
|
||||
#[test]
|
||||
fn test_setup() {
|
||||
spawn_child("setup", &[]);
|
||||
@@ -91,14 +105,21 @@ fn test_thread_list_from_parent() {
|
||||
let num_of_threads = 5;
|
||||
let mut child = start_child_and_wait_for_threads(num_of_threads);
|
||||
let pid = child.id() as i32;
|
||||
let mut dumper = PtraceDumper::new(
|
||||
|
||||
let mut dumper = assert_no_soft_errors!(
|
||||
soft_errors,
|
||||
PtraceDumper::new_report_soft_errors(
|
||||
pid,
|
||||
minidump_writer::minidump_writer::STOP_TIMEOUT,
|
||||
Default::default(),
|
||||
&mut soft_errors,
|
||||
)
|
||||
)
|
||||
.expect("Couldn't init dumper");
|
||||
|
||||
assert_eq!(dumper.threads.len(), num_of_threads);
|
||||
dumper.suspend_threads().expect("Could not suspend threads");
|
||||
|
||||
assert_no_soft_errors!(soft_errors, dumper.suspend_threads(&mut soft_errors));
|
||||
|
||||
// let mut matching_threads = 0;
|
||||
for (idx, curr_thread) in dumper.threads.iter().enumerate() {
|
||||
@@ -146,7 +167,7 @@ fn test_thread_list_from_parent() {
|
||||
0
|
||||
}; */
|
||||
}
|
||||
dumper.resume_threads().expect("Failed to resume threads");
|
||||
assert_no_soft_errors!(soft_errors, dumper.resume_threads(&mut soft_errors));
|
||||
child.kill().expect("Failed to kill process");
|
||||
|
||||
// Reap child
|
||||
@@ -272,14 +293,20 @@ fn test_sanitize_stack_copy() {
|
||||
let heap_addr = usize::from_str_radix(output.next().unwrap().trim_start_matches("0x"), 16)
|
||||
.expect("unable to parse mmap_addr");
|
||||
|
||||
let mut dumper = PtraceDumper::new(
|
||||
let mut dumper = assert_no_soft_errors!(
|
||||
soft_errors,
|
||||
PtraceDumper::new_report_soft_errors(
|
||||
pid,
|
||||
minidump_writer::minidump_writer::STOP_TIMEOUT,
|
||||
Default::default(),
|
||||
&mut soft_errors,
|
||||
)
|
||||
)
|
||||
.expect("Couldn't init dumper");
|
||||
assert_eq!(dumper.threads.len(), num_of_threads);
|
||||
dumper.suspend_threads().expect("Could not suspend threads");
|
||||
|
||||
assert_no_soft_errors!(soft_errors, dumper.suspend_threads(&mut soft_errors));
|
||||
|
||||
let thread_info = dumper
|
||||
.get_thread_info_by_index(0)
|
||||
.expect("Couldn't find thread_info");
|
||||
@@ -374,7 +401,8 @@ fn test_sanitize_stack_copy() {
|
||||
|
||||
assert_eq!(simulated_stack[0..size_of::<usize>()], defaced);
|
||||
|
||||
dumper.resume_threads().expect("Failed to resume threads");
|
||||
assert_no_soft_errors!(soft_errors, dumper.resume_threads(&mut soft_errors));
|
||||
|
||||
child.kill().expect("Failed to kill process");
|
||||
|
||||
// Reap child
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"ffe32239c2958904c0be971a991667397078fe6db3439350a8af4004af7f5473","LICENSE":"06de63df29199a394442b57a28e886059ddc940973e10646877a0793fd53e2c9","README.md":"f0fe4547fd7044ef4d06d902491be33a0fc13ba003c4e5af1965d6126907ced8","src/context.rs":"b271075155d33cce5c1f79ce4b1fd2f3335293d9a4344c3681e5825f3caeceb8","src/iostuff.rs":"eedeb0a9cf9f7d2af1558c916c73ec7287de5f4e45b4ff6b9a8013645f09b9e1","src/lib.rs":"a63adbdfbad839969998249ed6032d226557915761b12f9ee029c3766ec9507f","src/minidump.rs":"7a07e390b4790478f1f81208a81a59b9e8dc61fde1588c1c223f21ee21246859","src/strings.rs":"b55266f137550602733319fe6c3a83a6b84931c47ccdcfe13de72c40d3cea1df","src/system_info.rs":"96cbbd3239c388474e60690ef9250040c91331811f8ff06a10ed82b9f55e7455","tests/test_minidump.rs":"d9fb6b8ec7749d3bc6bf6de6b8a9a73d5934c81881a9d98087fcc490e8bb447d"},"package":"cee91aa51259518a08a12c18b5754e45135f89f1d9d7d6aae76ce93b92686698"}
|
||||
{"files":{"Cargo.toml":"d99d5b3c0c05fb956c6526ce35a3f27cb9e27b0cb78efea22ebd9afa0bf44e87","LICENSE":"06de63df29199a394442b57a28e886059ddc940973e10646877a0793fd53e2c9","README.md":"f0fe4547fd7044ef4d06d902491be33a0fc13ba003c4e5af1965d6126907ced8","src/context.rs":"2d999a7fd135738ca85236ecdc90a124272c85afdb0b8747e5da0454da137072","src/iostuff.rs":"eedeb0a9cf9f7d2af1558c916c73ec7287de5f4e45b4ff6b9a8013645f09b9e1","src/lib.rs":"a63adbdfbad839969998249ed6032d226557915761b12f9ee029c3766ec9507f","src/minidump.rs":"16abaa34f8a53849275499a7a5560ec597d8e1656df5b5d1e68169cd7f7ed485","src/strings.rs":"b55266f137550602733319fe6c3a83a6b84931c47ccdcfe13de72c40d3cea1df","src/system_info.rs":"96cbbd3239c388474e60690ef9250040c91331811f8ff06a10ed82b9f55e7455","tests/test_minidump.rs":"d9fb6b8ec7749d3bc6bf6de6b8a9a73d5934c81881a9d98087fcc490e8bb447d"},"package":"e03e301d414a75655d4ce80e6e3690fbfe70814b67c496c64c826ba558d18ec9"}
|
||||
7
third_party/rust/minidump/Cargo.toml
vendored
7
third_party/rust/minidump/Cargo.toml
vendored
@@ -12,9 +12,10 @@
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "minidump"
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
authors = ["Ted Mielczarek <ted@mielczarek.org>"]
|
||||
build = false
|
||||
autolib = false
|
||||
autobins = false
|
||||
autoexamples = false
|
||||
autotests = false
|
||||
@@ -55,13 +56,13 @@ version = "0.8"
|
||||
version = "0.9"
|
||||
|
||||
[dependencies.minidump-common]
|
||||
version = "0.22.1"
|
||||
version = "0.24.0"
|
||||
|
||||
[dependencies.num-traits]
|
||||
version = "0.2"
|
||||
|
||||
[dependencies.procfs-core]
|
||||
version = "0.16"
|
||||
version = "0.17"
|
||||
default-features = false
|
||||
|
||||
[dependencies.range-map]
|
||||
|
||||
2
third_party/rust/minidump/src/context.rs
vendored
2
third_party/rust/minidump/src/context.rs
vendored
@@ -140,7 +140,7 @@ pub struct CpuRegisters<'a, T: ?Sized> {
|
||||
context: &'a T,
|
||||
}
|
||||
|
||||
impl<'a, T> Iterator for CpuRegisters<'a, T>
|
||||
impl<T> Iterator for CpuRegisters<'_, T>
|
||||
where
|
||||
T: CpuContext,
|
||||
{
|
||||
|
||||
32
third_party/rust/minidump/src/minidump.rs
vendored
32
third_party/rust/minidump/src/minidump.rs
vendored
@@ -601,7 +601,7 @@ pub enum UnifiedMemoryList<'a> {
|
||||
Memory(MinidumpMemoryList<'a>),
|
||||
Memory64(MinidumpMemory64List<'a>),
|
||||
}
|
||||
impl<'a> Default for UnifiedMemoryList<'a> {
|
||||
impl Default for UnifiedMemoryList<'_> {
|
||||
fn default() -> Self {
|
||||
Self::Memory(Default::default())
|
||||
}
|
||||
@@ -1998,7 +1998,7 @@ impl<'a> MinidumpMemory<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MinidumpMemory64<'a> {
|
||||
impl MinidumpMemory64<'_> {
|
||||
/// Write a human-readable description of this `MinidumpMemory64` to `f`.
|
||||
///
|
||||
/// This is very verbose, it is the format used by `minidump_dump`.
|
||||
@@ -2184,7 +2184,7 @@ impl<'mdmp, Descriptor> MinidumpMemoryListBase<'mdmp, Descriptor> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mdmp> MinidumpMemoryList<'mdmp> {
|
||||
impl MinidumpMemoryList<'_> {
|
||||
/// Write a human-readable description of this `MinidumpMemoryList` to `f`.
|
||||
///
|
||||
/// This is very verbose, it is the format used by `minidump_dump`.
|
||||
@@ -2205,7 +2205,7 @@ impl<'mdmp> MinidumpMemoryList<'mdmp> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mdmp> MinidumpMemory64List<'mdmp> {
|
||||
impl MinidumpMemory64List<'_> {
|
||||
/// Write a human-readable description of this `MinidumpMemory64List` to `f`.
|
||||
///
|
||||
/// This is very verbose, it is the format used by `minidump_dump`.
|
||||
@@ -2281,7 +2281,7 @@ impl<'mdmp> UnifiedMemoryList<'mdmp> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, Descriptor> Default for MinidumpMemoryListBase<'a, Descriptor> {
|
||||
impl<Descriptor> Default for MinidumpMemoryListBase<'_, Descriptor> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
@@ -2406,7 +2406,7 @@ impl<'a> MinidumpStream<'a> for MinidumpMemoryInfoList<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Default for MinidumpMemoryInfoList<'a> {
|
||||
impl Default for MinidumpMemoryInfoList<'_> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
@@ -2476,7 +2476,7 @@ impl<'mdmp> MinidumpMemoryInfoList<'mdmp> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MinidumpMemoryInfo<'a> {
|
||||
impl MinidumpMemoryInfo<'_> {
|
||||
/// Write a human-readable description.
|
||||
pub fn print<T: Write>(&self, f: &mut T) -> io::Result<()> {
|
||||
write!(
|
||||
@@ -2567,7 +2567,7 @@ impl<'a> MinidumpStream<'a> for MinidumpLinuxMaps<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Default for MinidumpLinuxMaps<'a> {
|
||||
impl Default for MinidumpLinuxMaps<'_> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
@@ -2637,7 +2637,7 @@ impl<'mdmp> MinidumpLinuxMaps<'mdmp> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MinidumpLinuxMapInfo<'a> {
|
||||
impl MinidumpLinuxMapInfo<'_> {
|
||||
/// Write a human-readable description of this.
|
||||
pub fn print<T: Write>(&self, f: &mut T) -> io::Result<()> {
|
||||
write!(
|
||||
@@ -2692,7 +2692,7 @@ impl<'a> MinidumpLinuxMapInfo<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Default for UnifiedMemoryInfoList<'a> {
|
||||
impl Default for UnifiedMemoryInfoList<'_> {
|
||||
fn default() -> Self {
|
||||
Self::Info(MinidumpMemoryInfoList::default())
|
||||
}
|
||||
@@ -2811,7 +2811,7 @@ macro_rules! unified_memory_forward {
|
||||
};
|
||||
}
|
||||
|
||||
impl<'a> UnifiedMemoryInfo<'a> {
|
||||
impl UnifiedMemoryInfo<'_> {
|
||||
unified_memory_forward! {
|
||||
/// Write a human-readable description.
|
||||
pub fn print<T: Write>(&self, f: &mut T) -> io::Result<()>;
|
||||
@@ -3148,7 +3148,7 @@ impl<'a> MinidumpStream<'a> for MinidumpThreadInfoList {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MinidumpStream<'a> for MinidumpSystemInfo {
|
||||
impl MinidumpStream<'_> for MinidumpSystemInfo {
|
||||
const STREAM_TYPE: u32 = MINIDUMP_STREAM_TYPE::SystemInfoStream as u32;
|
||||
|
||||
fn read(
|
||||
@@ -3537,7 +3537,7 @@ impl RawMiscInfo {
|
||||
);
|
||||
}
|
||||
|
||||
impl<'a> MinidumpStream<'a> for MinidumpMiscInfo {
|
||||
impl MinidumpStream<'_> for MinidumpMiscInfo {
|
||||
const STREAM_TYPE: u32 = MINIDUMP_STREAM_TYPE::MiscInfoStream as u32;
|
||||
|
||||
fn read(
|
||||
@@ -3667,7 +3667,7 @@ impl RawMacCrashInfo {
|
||||
);
|
||||
}
|
||||
|
||||
impl<'a> MinidumpStream<'a> for MinidumpMacCrashInfo {
|
||||
impl MinidumpStream<'_> for MinidumpMacCrashInfo {
|
||||
const STREAM_TYPE: u32 = MINIDUMP_STREAM_TYPE::MozMacosCrashInfoStream as u32;
|
||||
|
||||
fn read(
|
||||
@@ -3826,7 +3826,7 @@ impl MinidumpMacCrashInfo {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MinidumpStream<'a> for MinidumpMacBootargs {
|
||||
impl MinidumpStream<'_> for MinidumpMacBootargs {
|
||||
const STREAM_TYPE: u32 = MINIDUMP_STREAM_TYPE::MozMacosBootargsStream as u32;
|
||||
|
||||
fn read(
|
||||
@@ -4145,7 +4145,7 @@ impl MinidumpMiscInfo {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> MinidumpStream<'a> for MinidumpBreakpadInfo {
|
||||
impl MinidumpStream<'_> for MinidumpBreakpadInfo {
|
||||
const STREAM_TYPE: u32 = MINIDUMP_STREAM_TYPE::BreakpadInfoStream as u32;
|
||||
|
||||
fn read(
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"bf2944cc99c95272ba1114b998c83af42d5e64e64a6fb48b2f2a921c002d9077","README.md":"ac6e93e07291e148ba9e1913ceabee01d8882e0b7e263ee9287b20a2e500f599","src/cgroups.rs":"fda4941006913801ba0653d3b04924fd6e7cbebf5759814eef4e1176a94b4e91","src/cpuinfo.rs":"2ae02c7183d3ac50625a1958c447fa6d7da351c94e43fec388517a8d01718059","src/diskstats.rs":"e554dbaa8772a6e7cb857a052bd69191058a19b5c0aee4340f260ad8d53a6fba","src/iomem.rs":"2abfd4428ad6f4354ebcca210a8b765dc8b07d5e30f060dcf77cf8296f1e4d38","src/keyring.rs":"a7c156895c70454e453b7701b7036820e673aefe08d5a63ee517b65a4e66bf67","src/lib.rs":"4ae954f01c0886afe0cb196f5cfa38341b8557fc91aaaff95ee5b2f3765ad0ae","src/locks.rs":"5c26a0b39a4dae5951ef79b013856db3c69d8b279981065c71100a9e216a566a","src/meminfo.rs":"5bf226a7773c94db47bd53cbf642b15e72d67292429d20d7fbcd14c891c32622","src/mounts.rs":"181ae262cdd3cf08203fe73f2cd397da8b3db009c0a0c8acb6b22da13149fa67","src/net.rs":"a6a609d2bd2571ccc6fd2a5a9be9354bee5a9b874a913ab02bbdf46fa0968e3f","src/partitions.rs":"f6ea6482b80bfa59a6d90ddf05ea62707467c5cc7b791a490e0858a846dfcd2e","src/pressure.rs":"33a22c29d7f5c01603ad515e102d594235d01d176213668da547ee5634558992","src/process/clear_refs.rs":"59c885bfb839b72e6c2234c148a977146da02af8aadc6d7d24b5936aa42e4ebe","src/process/limit.rs":"3f449b0b266418099f3003c65b04c468a4c92845350aa35dd28cc13d7b7f656e","src/process/mod.rs":"2bd84aadb16a04b7c3ff48b9395ba36b85de979543a90d136c8d63aef0bd004d","src/process/mount.rs":"4ed4c468bd0f37c012b17154d69c214a46e52f059dd9121e07d2d29561fe52ee","src/process/namespaces.rs":"36b65fce5e1554dcce9019acb4cdc0dcc47420ef4336a7d9d97d8fc455ab3297","src/process/pagemap.rs":"0f1a7908dba734a26fbe044510f61c0df7ad00dbfcba6a2279064d2ca7aa8278","src/process/schedstat.rs":"cebbd598e03a765bcf59a22989e846fa78d660c4cb87623d07e594849cf0cf66","src/process/smaps_rollup.rs":"8d7d2c69fc6d54856d240a14ed6dfa4dcc92b8b29c3139fe651d18f2011a7575","src/process/stat.rs":"17849e16b79e34ec8c4c230f196d0e5f1eb756ddb013b3c97e3283b83ff5ef4f","src/process/status.rs":"81de4bd29df76aef0792e23f4f802fff1134ada059f2f4293d9a771a23aa5f02","src/sys/kernel/mod.rs":"54437f0a32eba1e6eaffa11ad041a2e914a16726cb2904af61131dbaa4bae31c","src/sys/mod.rs":"a7b744630e859005307046558f5e827e07b73cb0b5f1dd89acb02f21059e775c","src/sysvipc_shm.rs":"794b2ed9d2b20d25c6da220d6c7ac0b1b54219c9ebb0a1bf6ff9a11dd091f2a7","src/uptime.rs":"ffea251a2ba4da2a311b6a51606102771e55b2db1e33ced5b3569dde319b264b"},"package":"2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29"}
|
||||
{"files":{"COPYRIGHT.txt":"9f595add2d755fc897ea3045cccbf7146acf51f8b5cf5d2669aee18634eef4bf","Cargo.toml":"507766f8c741f4cff4f69b7750af969560df8cd082f37b4d468fb1b273837932","LICENSE-APACHE":"c6596eb7be8581c18be736c846fb9173b69eccf6ef94c5135893ec56bd92ba08","LICENSE-MIT":"c5bbf39118b0639bf8bd391ae0d7d81f25c1cb4066e0fdae6a405b20fb7ca170","README.md":"ac6e93e07291e148ba9e1913ceabee01d8882e0b7e263ee9287b20a2e500f599","src/cgroups.rs":"b96ce48ec7ba93045e1baa23fce4c6a8d209570892314cb8751e57e3386b0fa8","src/cpuinfo.rs":"5717b21bac1b8475550d9090f3ba77bf9f88ba84f37918038f9b9e8389122b9c","src/crypto.rs":"348dfd27896f299d0c421bf9b6bbc563e0bca915b5ea477acb50b10b9bfecf29","src/devices.rs":"332f5bfd3b2c5ff4eac19475bb025b7822ee8bd3c0332c04e2401b54f751cc01","src/diskstats.rs":"e554dbaa8772a6e7cb857a052bd69191058a19b5c0aee4340f260ad8d53a6fba","src/iomem.rs":"2abfd4428ad6f4354ebcca210a8b765dc8b07d5e30f060dcf77cf8296f1e4d38","src/keyring.rs":"a7c156895c70454e453b7701b7036820e673aefe08d5a63ee517b65a4e66bf67","src/kpageflags.rs":"628b445b72f7112e4dd78c3499228bd20ade1a4674d4485bfdf46cf929e10ad6","src/lib.rs":"582cd6ad12f4e3c5fc230f490a3b2d9ccc51229349866cabc051844de317746f","src/locks.rs":"5c26a0b39a4dae5951ef79b013856db3c69d8b279981065c71100a9e216a566a","src/meminfo.rs":"588691e31633a37bc8a265c151d35ef93a5b6dd15603e63579277ab66d91699f","src/mounts.rs":"181ae262cdd3cf08203fe73f2cd397da8b3db009c0a0c8acb6b22da13149fa67","src/net.rs":"938b1857e8170c21969026eb767dcdb6e500b22113400bd1add441aeeb2b2939","src/partitions.rs":"dbe7e9b774980a35450fbd164a3326fcf8774bdffab27b492f3835991570e65b","src/pressure.rs":"33a22c29d7f5c01603ad515e102d594235d01d176213668da547ee5634558992","src/process/clear_refs.rs":"59c885bfb839b72e6c2234c148a977146da02af8aadc6d7d24b5936aa42e4ebe","src/process/limit.rs":"3f449b0b266418099f3003c65b04c468a4c92845350aa35dd28cc13d7b7f656e","src/process/mod.rs":"2bd84aadb16a04b7c3ff48b9395ba36b85de979543a90d136c8d63aef0bd004d","src/process/mount.rs":"0387c79c35ccf7221badf5d6d92a35e8fd4e98bc3c97bbe9b34a7e4af64c0564","src/process/namespaces.rs":"36b65fce5e1554dcce9019acb4cdc0dcc47420ef4336a7d9d97d8fc455ab3297","src/process/pagemap.rs":"94e98463917e23144b9a109b46731443914fc6fc8c12c2730d3a6942f220c789","src/process/schedstat.rs":"cebbd598e03a765bcf59a22989e846fa78d660c4cb87623d07e594849cf0cf66","src/process/smaps_rollup.rs":"8d7d2c69fc6d54856d240a14ed6dfa4dcc92b8b29c3139fe651d18f2011a7575","src/process/stat.rs":"17849e16b79e34ec8c4c230f196d0e5f1eb756ddb013b3c97e3283b83ff5ef4f","src/process/status.rs":"81de4bd29df76aef0792e23f4f802fff1134ada059f2f4293d9a771a23aa5f02","src/sys/kernel/mod.rs":"54437f0a32eba1e6eaffa11ad041a2e914a16726cb2904af61131dbaa4bae31c","src/sys/mod.rs":"a7b744630e859005307046558f5e827e07b73cb0b5f1dd89acb02f21059e775c","src/sysvipc_shm.rs":"794b2ed9d2b20d25c6da220d6c7ac0b1b54219c9ebb0a1bf6ff9a11dd091f2a7","src/uptime.rs":"ffea251a2ba4da2a311b6a51606102771e55b2db1e33ced5b3569dde319b264b"},"package":"239df02d8349b06fc07398a3a1697b06418223b1c7725085e801e7c0fc6a12ec"}
|
||||
483
third_party/rust/procfs-core/COPYRIGHT.txt
vendored
Normal file
483
third_party/rust/procfs-core/COPYRIGHT.txt
vendored
Normal file
@@ -0,0 +1,483 @@
|
||||
The source code for the procfs library is copyright by Andrew Chin, 2019, and other contributors.
|
||||
|
||||
It is licensed under either of
|
||||
|
||||
* Apache License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0
|
||||
* MIT license, http://opensource.org/licenses/MIT
|
||||
|
||||
at your option.
|
||||
|
||||
The documentation of this library is derived from documentation written by others:
|
||||
|
||||
* The proc(5) man page:
|
||||
|
||||
Copyright (C) 1994, 1995 by Daniel Quinlan (quinlan@yggdrasil.com)
|
||||
and Copyright (C) 2002-2008,2017 Michael Kerrisk <mtk.manpages@gmail.com>
|
||||
with networking additions from Alan Cox (A.Cox@swansea.ac.uk)
|
||||
and scsi additions from Michael Neuffer (neuffer@mail.uni-mainz.de)
|
||||
and sysctl additions from Andries Brouwer (aeb@cwi.nl)
|
||||
and System V IPC (as well as various other) additions from
|
||||
Michael Kerrisk <mtk.manpages@gmail.com>
|
||||
|
||||
Under the GPL Free Documentation License (reproduced below).
|
||||
|
||||
* Other manual pages:
|
||||
|
||||
Copyright (c) 2006, 2008 by Michael Kerrisk <mtk.manpages@gmail.com>
|
||||
|
||||
Under the following license:
|
||||
|
||||
Permission is granted to make and distribute verbatim copies of this
|
||||
manual provided the copyright notice and this permission notice are
|
||||
preserved on all copies.
|
||||
|
||||
Permission is granted to copy and distribute modified versions of this
|
||||
manual under the conditions for verbatim copying, provided that the
|
||||
entire resulting derived work is distributed under the terms of a
|
||||
permission notice identical to this one.
|
||||
|
||||
Since the Linux kernel and libraries are constantly changing, this
|
||||
manual page may be incorrect or out-of-date. The author(s) assume no
|
||||
responsibility for errors or omissions, or for damages resulting from
|
||||
the use of the information contained herein. The author(s) may not
|
||||
have taken the same level of care in the production of this manual,
|
||||
which is licensed free of charge, as they might when working
|
||||
professionally.
|
||||
|
||||
Formatted or processed versions of this manual, if unaccompanied by
|
||||
the source, must acknowledge the copyright and authors of this work.
|
||||
|
||||
* The Linux Documentation Project:
|
||||
|
||||
Copyright 2003 Binh Nguyen
|
||||
|
||||
Under the GPL Free Documenation License. See: http://tldp.org/LDP/Linux-Filesystem-Hierarchy/html/ln14.html
|
||||
|
||||
|
||||
==================================
|
||||
Below is a copy of the GPL license:
|
||||
|
||||
This is free documentation; you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License as
|
||||
published by the Free Software Foundation; either version 2 of
|
||||
the License, or (at your option) any later version.
|
||||
|
||||
The GNU General Public License's references to "object code"
|
||||
and "executables" are to be interpreted as the output of any
|
||||
document formatting or typesetting system, including
|
||||
intermediate and printed output.
|
||||
|
||||
This manual is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public
|
||||
License along with this manual; if not, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
==================================
|
||||
A full copy of the GNU Free Documentation License, version 1.2, can be found here:
|
||||
https://www.gnu.org/licenses/old-licenses/fdl-1.2.txt
|
||||
|
||||
Below is a copy of this license:
|
||||
|
||||
|
||||
GNU Free Documentation License
|
||||
Version 1.2, November 2002
|
||||
|
||||
|
||||
Copyright (C) 2000,2001,2002 Free Software Foundation, Inc.
|
||||
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
0. PREAMBLE
|
||||
|
||||
The purpose of this License is to make a manual, textbook, or other
|
||||
functional and useful document "free" in the sense of freedom: to
|
||||
assure everyone the effective freedom to copy and redistribute it,
|
||||
with or without modifying it, either commercially or noncommercially.
|
||||
Secondarily, this License preserves for the author and publisher a way
|
||||
to get credit for their work, while not being considered responsible
|
||||
for modifications made by others.
|
||||
|
||||
This License is a kind of "copyleft", which means that derivative
|
||||
works of the document must themselves be free in the same sense. It
|
||||
complements the GNU General Public License, which is a copyleft
|
||||
license designed for free software.
|
||||
|
||||
We have designed this License in order to use it for manuals for free
|
||||
software, because free software needs free documentation: a free
|
||||
program should come with manuals providing the same freedoms that the
|
||||
software does. But this License is not limited to software manuals;
|
||||
it can be used for any textual work, regardless of subject matter or
|
||||
whether it is published as a printed book. We recommend this License
|
||||
principally for works whose purpose is instruction or reference.
|
||||
|
||||
|
||||
1. APPLICABILITY AND DEFINITIONS
|
||||
|
||||
This License applies to any manual or other work, in any medium, that
|
||||
contains a notice placed by the copyright holder saying it can be
|
||||
distributed under the terms of this License. Such a notice grants a
|
||||
world-wide, royalty-free license, unlimited in duration, to use that
|
||||
work under the conditions stated herein. The "Document", below,
|
||||
refers to any such manual or work. Any member of the public is a
|
||||
licensee, and is addressed as "you". You accept the license if you
|
||||
copy, modify or distribute the work in a way requiring permission
|
||||
under copyright law.
|
||||
|
||||
A "Modified Version" of the Document means any work containing the
|
||||
Document or a portion of it, either copied verbatim, or with
|
||||
modifications and/or translated into another language.
|
||||
|
||||
A "Secondary Section" is a named appendix or a front-matter section of
|
||||
the Document that deals exclusively with the relationship of the
|
||||
publishers or authors of the Document to the Document's overall subject
|
||||
(or to related matters) and contains nothing that could fall directly
|
||||
within that overall subject. (Thus, if the Document is in part a
|
||||
textbook of mathematics, a Secondary Section may not explain any
|
||||
mathematics.) The relationship could be a matter of historical
|
||||
connection with the subject or with related matters, or of legal,
|
||||
commercial, philosophical, ethical or political position regarding
|
||||
them.
|
||||
|
||||
The "Invariant Sections" are certain Secondary Sections whose titles
|
||||
are designated, as being those of Invariant Sections, in the notice
|
||||
that says that the Document is released under this License. If a
|
||||
section does not fit the above definition of Secondary then it is not
|
||||
allowed to be designated as Invariant. The Document may contain zero
|
||||
Invariant Sections. If the Document does not identify any Invariant
|
||||
Sections then there are none.
|
||||
|
||||
The "Cover Texts" are certain short passages of text that are listed,
|
||||
as Front-Cover Texts or Back-Cover Texts, in the notice that says that
|
||||
the Document is released under this License. A Front-Cover Text may
|
||||
be at most 5 words, and a Back-Cover Text may be at most 25 words.
|
||||
|
||||
A "Transparent" copy of the Document means a machine-readable copy,
|
||||
represented in a format whose specification is available to the
|
||||
general public, that is suitable for revising the document
|
||||
straightforwardly with generic text editors or (for images composed of
|
||||
pixels) generic paint programs or (for drawings) some widely available
|
||||
drawing editor, and that is suitable for input to text formatters or
|
||||
for automatic translation to a variety of formats suitable for input
|
||||
to text formatters. A copy made in an otherwise Transparent file
|
||||
format whose markup, or absence of markup, has been arranged to thwart
|
||||
or discourage subsequent modification by readers is not Transparent.
|
||||
An image format is not Transparent if used for any substantial amount
|
||||
of text. A copy that is not "Transparent" is called "Opaque".
|
||||
|
||||
Examples of suitable formats for Transparent copies include plain
|
||||
ASCII without markup, Texinfo input format, LaTeX input format, SGML
|
||||
or XML using a publicly available DTD, and standard-conforming simple
|
||||
HTML, PostScript or PDF designed for human modification. Examples of
|
||||
transparent image formats include PNG, XCF and JPG. Opaque formats
|
||||
include proprietary formats that can be read and edited only by
|
||||
proprietary word processors, SGML or XML for which the DTD and/or
|
||||
processing tools are not generally available, and the
|
||||
machine-generated HTML, PostScript or PDF produced by some word
|
||||
processors for output purposes only.
|
||||
|
||||
The "Title Page" means, for a printed book, the title page itself,
|
||||
plus such following pages as are needed to hold, legibly, the material
|
||||
this License requires to appear in the title page. For works in
|
||||
formats which do not have any title page as such, "Title Page" means
|
||||
the text near the most prominent appearance of the work's title,
|
||||
preceding the beginning of the body of the text.
|
||||
|
||||
A section "Entitled XYZ" means a named subunit of the Document whose
|
||||
title either is precisely XYZ or contains XYZ in parentheses following
|
||||
text that translates XYZ in another language. (Here XYZ stands for a
|
||||
specific section name mentioned below, such as "Acknowledgements",
|
||||
"Dedications", "Endorsements", or "History".) To "Preserve the Title"
|
||||
of such a section when you modify the Document means that it remains a
|
||||
section "Entitled XYZ" according to this definition.
|
||||
|
||||
The Document may include Warranty Disclaimers next to the notice which
|
||||
states that this License applies to the Document. These Warranty
|
||||
Disclaimers are considered to be included by reference in this
|
||||
License, but only as regards disclaiming warranties: any other
|
||||
implication that these Warranty Disclaimers may have is void and has
|
||||
no effect on the meaning of this License.
|
||||
|
||||
|
||||
2. VERBATIM COPYING
|
||||
|
||||
You may copy and distribute the Document in any medium, either
|
||||
commercially or noncommercially, provided that this License, the
|
||||
copyright notices, and the license notice saying this License applies
|
||||
to the Document are reproduced in all copies, and that you add no other
|
||||
conditions whatsoever to those of this License. You may not use
|
||||
technical measures to obstruct or control the reading or further
|
||||
copying of the copies you make or distribute. However, you may accept
|
||||
compensation in exchange for copies. If you distribute a large enough
|
||||
number of copies you must also follow the conditions in section 3.
|
||||
|
||||
You may also lend copies, under the same conditions stated above, and
|
||||
you may publicly display copies.
|
||||
|
||||
|
||||
3. COPYING IN QUANTITY
|
||||
|
||||
If you publish printed copies (or copies in media that commonly have
|
||||
printed covers) of the Document, numbering more than 100, and the
|
||||
Document's license notice requires Cover Texts, you must enclose the
|
||||
copies in covers that carry, clearly and legibly, all these Cover
|
||||
Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on
|
||||
the back cover. Both covers must also clearly and legibly identify
|
||||
you as the publisher of these copies. The front cover must present
|
||||
the full title with all words of the title equally prominent and
|
||||
visible. You may add other material on the covers in addition.
|
||||
Copying with changes limited to the covers, as long as they preserve
|
||||
the title of the Document and satisfy these conditions, can be treated
|
||||
as verbatim copying in other respects.
|
||||
|
||||
If the required texts for either cover are too voluminous to fit
|
||||
legibly, you should put the first ones listed (as many as fit
|
||||
reasonably) on the actual cover, and continue the rest onto adjacent
|
||||
pages.
|
||||
|
||||
If you publish or distribute Opaque copies of the Document numbering
|
||||
more than 100, you must either include a machine-readable Transparent
|
||||
copy along with each Opaque copy, or state in or with each Opaque copy
|
||||
a computer-network location from which the general network-using
|
||||
public has access to download using public-standard network protocols
|
||||
a complete Transparent copy of the Document, free of added material.
|
||||
If you use the latter option, you must take reasonably prudent steps,
|
||||
when you begin distribution of Opaque copies in quantity, to ensure
|
||||
that this Transparent copy will remain thus accessible at the stated
|
||||
location until at least one year after the last time you distribute an
|
||||
Opaque copy (directly or through your agents or retailers) of that
|
||||
edition to the public.
|
||||
|
||||
It is requested, but not required, that you contact the authors of the
|
||||
Document well before redistributing any large number of copies, to give
|
||||
them a chance to provide you with an updated version of the Document.
|
||||
|
||||
|
||||
4. MODIFICATIONS
|
||||
|
||||
You may copy and distribute a Modified Version of the Document under
|
||||
the conditions of sections 2 and 3 above, provided that you release
|
||||
the Modified Version under precisely this License, with the Modified
|
||||
Version filling the role of the Document, thus licensing distribution
|
||||
and modification of the Modified Version to whoever possesses a copy
|
||||
of it. In addition, you must do these things in the Modified Version:
|
||||
|
||||
A. Use in the Title Page (and on the covers, if any) a title distinct
|
||||
from that of the Document, and from those of previous versions
|
||||
(which should, if there were any, be listed in the History section
|
||||
of the Document). You may use the same title as a previous version
|
||||
if the original publisher of that version gives permission.
|
||||
B. List on the Title Page, as authors, one or more persons or entities
|
||||
responsible for authorship of the modifications in the Modified
|
||||
Version, together with at least five of the principal authors of the
|
||||
Document (all of its principal authors, if it has fewer than five),
|
||||
unless they release you from this requirement.
|
||||
C. State on the Title page the name of the publisher of the
|
||||
Modified Version, as the publisher.
|
||||
D. Preserve all the copyright notices of the Document.
|
||||
E. Add an appropriate copyright notice for your modifications
|
||||
adjacent to the other copyright notices.
|
||||
F. Include, immediately after the copyright notices, a license notice
|
||||
giving the public permission to use the Modified Version under the
|
||||
terms of this License, in the form shown in the Addendum below.
|
||||
G. Preserve in that license notice the full lists of Invariant Sections
|
||||
and required Cover Texts given in the Document's license notice.
|
||||
H. Include an unaltered copy of this License.
|
||||
I. Preserve the section Entitled "History", Preserve its Title, and add
|
||||
to it an item stating at least the title, year, new authors, and
|
||||
publisher of the Modified Version as given on the Title Page. If
|
||||
there is no section Entitled "History" in the Document, create one
|
||||
stating the title, year, authors, and publisher of the Document as
|
||||
given on its Title Page, then add an item describing the Modified
|
||||
Version as stated in the previous sentence.
|
||||
J. Preserve the network location, if any, given in the Document for
|
||||
public access to a Transparent copy of the Document, and likewise
|
||||
the network locations given in the Document for previous versions
|
||||
it was based on. These may be placed in the "History" section.
|
||||
You may omit a network location for a work that was published at
|
||||
least four years before the Document itself, or if the original
|
||||
publisher of the version it refers to gives permission.
|
||||
K. For any section Entitled "Acknowledgements" or "Dedications",
|
||||
Preserve the Title of the section, and preserve in the section all
|
||||
the substance and tone of each of the contributor acknowledgements
|
||||
and/or dedications given therein.
|
||||
L. Preserve all the Invariant Sections of the Document,
|
||||
unaltered in their text and in their titles. Section numbers
|
||||
or the equivalent are not considered part of the section titles.
|
||||
M. Delete any section Entitled "Endorsements". Such a section
|
||||
may not be included in the Modified Version.
|
||||
N. Do not retitle any existing section to be Entitled "Endorsements"
|
||||
or to conflict in title with any Invariant Section.
|
||||
O. Preserve any Warranty Disclaimers.
|
||||
|
||||
If the Modified Version includes new front-matter sections or
|
||||
appendices that qualify as Secondary Sections and contain no material
|
||||
copied from the Document, you may at your option designate some or all
|
||||
of these sections as invariant. To do this, add their titles to the
|
||||
list of Invariant Sections in the Modified Version's license notice.
|
||||
These titles must be distinct from any other section titles.
|
||||
|
||||
You may add a section Entitled "Endorsements", provided it contains
|
||||
nothing but endorsements of your Modified Version by various
|
||||
parties--for example, statements of peer review or that the text has
|
||||
been approved by an organization as the authoritative definition of a
|
||||
standard.
|
||||
|
||||
You may add a passage of up to five words as a Front-Cover Text, and a
|
||||
passage of up to 25 words as a Back-Cover Text, to the end of the list
|
||||
of Cover Texts in the Modified Version. Only one passage of
|
||||
Front-Cover Text and one of Back-Cover Text may be added by (or
|
||||
through arrangements made by) any one entity. If the Document already
|
||||
includes a cover text for the same cover, previously added by you or
|
||||
by arrangement made by the same entity you are acting on behalf of,
|
||||
you may not add another; but you may replace the old one, on explicit
|
||||
permission from the previous publisher that added the old one.
|
||||
|
||||
The author(s) and publisher(s) of the Document do not by this License
|
||||
give permission to use their names for publicity for or to assert or
|
||||
imply endorsement of any Modified Version.
|
||||
|
||||
|
||||
5. COMBINING DOCUMENTS
|
||||
|
||||
You may combine the Document with other documents released under this
|
||||
License, under the terms defined in section 4 above for modified
|
||||
versions, provided that you include in the combination all of the
|
||||
Invariant Sections of all of the original documents, unmodified, and
|
||||
list them all as Invariant Sections of your combined work in its
|
||||
license notice, and that you preserve all their Warranty Disclaimers.
|
||||
|
||||
The combined work need only contain one copy of this License, and
|
||||
multiple identical Invariant Sections may be replaced with a single
|
||||
copy. If there are multiple Invariant Sections with the same name but
|
||||
different contents, make the title of each such section unique by
|
||||
adding at the end of it, in parentheses, the name of the original
|
||||
author or publisher of that section if known, or else a unique number.
|
||||
Make the same adjustment to the section titles in the list of
|
||||
Invariant Sections in the license notice of the combined work.
|
||||
|
||||
In the combination, you must combine any sections Entitled "History"
|
||||
in the various original documents, forming one section Entitled
|
||||
"History"; likewise combine any sections Entitled "Acknowledgements",
|
||||
and any sections Entitled "Dedications". You must delete all sections
|
||||
Entitled "Endorsements".
|
||||
|
||||
|
||||
6. COLLECTIONS OF DOCUMENTS
|
||||
|
||||
You may make a collection consisting of the Document and other documents
|
||||
released under this License, and replace the individual copies of this
|
||||
License in the various documents with a single copy that is included in
|
||||
the collection, provided that you follow the rules of this License for
|
||||
verbatim copying of each of the documents in all other respects.
|
||||
|
||||
You may extract a single document from such a collection, and distribute
|
||||
it individually under this License, provided you insert a copy of this
|
||||
License into the extracted document, and follow this License in all
|
||||
other respects regarding verbatim copying of that document.
|
||||
|
||||
|
||||
7. AGGREGATION WITH INDEPENDENT WORKS
|
||||
|
||||
A compilation of the Document or its derivatives with other separate
|
||||
and independent documents or works, in or on a volume of a storage or
|
||||
distribution medium, is called an "aggregate" if the copyright
|
||||
resulting from the compilation is not used to limit the legal rights
|
||||
of the compilation's users beyond what the individual works permit.
|
||||
When the Document is included in an aggregate, this License does not
|
||||
apply to the other works in the aggregate which are not themselves
|
||||
derivative works of the Document.
|
||||
|
||||
If the Cover Text requirement of section 3 is applicable to these
|
||||
copies of the Document, then if the Document is less than one half of
|
||||
the entire aggregate, the Document's Cover Texts may be placed on
|
||||
covers that bracket the Document within the aggregate, or the
|
||||
electronic equivalent of covers if the Document is in electronic form.
|
||||
Otherwise they must appear on printed covers that bracket the whole
|
||||
aggregate.
|
||||
|
||||
|
||||
8. TRANSLATION
|
||||
|
||||
Translation is considered a kind of modification, so you may
|
||||
distribute translations of the Document under the terms of section 4.
|
||||
Replacing Invariant Sections with translations requires special
|
||||
permission from their copyright holders, but you may include
|
||||
translations of some or all Invariant Sections in addition to the
|
||||
original versions of these Invariant Sections. You may include a
|
||||
translation of this License, and all the license notices in the
|
||||
Document, and any Warranty Disclaimers, provided that you also include
|
||||
the original English version of this License and the original versions
|
||||
of those notices and disclaimers. In case of a disagreement between
|
||||
the translation and the original version of this License or a notice
|
||||
or disclaimer, the original version will prevail.
|
||||
|
||||
If a section in the Document is Entitled "Acknowledgements",
|
||||
"Dedications", or "History", the requirement (section 4) to Preserve
|
||||
its Title (section 1) will typically require changing the actual
|
||||
title.
|
||||
|
||||
|
||||
9. TERMINATION
|
||||
|
||||
You may not copy, modify, sublicense, or distribute the Document except
|
||||
as expressly provided for under this License. Any other attempt to
|
||||
copy, modify, sublicense or distribute the Document is void, and will
|
||||
automatically terminate your rights under this License. However,
|
||||
parties who have received copies, or rights, from you under this
|
||||
License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
|
||||
|
||||
10. FUTURE REVISIONS OF THIS LICENSE
|
||||
|
||||
The Free Software Foundation may publish new, revised versions
|
||||
of the GNU Free Documentation License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns. See
|
||||
https://www.gnu.org/licenses/.
|
||||
|
||||
Each version of the License is given a distinguishing version number.
|
||||
If the Document specifies that a particular numbered version of this
|
||||
License "or any later version" applies to it, you have the option of
|
||||
following the terms and conditions either of that specified version or
|
||||
of any later version that has been published (not as a draft) by the
|
||||
Free Software Foundation. If the Document does not specify a version
|
||||
number of this License, you may choose any version ever published (not
|
||||
as a draft) by the Free Software Foundation.
|
||||
|
||||
|
||||
ADDENDUM: How to use this License for your documents
|
||||
|
||||
To use this License in a document you have written, include a copy of
|
||||
the License in the document and put the following copyright and
|
||||
license notices just after the title page:
|
||||
|
||||
Copyright (c) YEAR YOUR NAME.
|
||||
Permission is granted to copy, distribute and/or modify this document
|
||||
under the terms of the GNU Free Documentation License, Version 1.2
|
||||
or any later version published by the Free Software Foundation;
|
||||
with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
|
||||
A copy of the license is included in the section entitled "GNU
|
||||
Free Documentation License".
|
||||
|
||||
If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts,
|
||||
replace the "with...Texts." line with this:
|
||||
|
||||
with the Invariant Sections being LIST THEIR TITLES, with the
|
||||
Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST.
|
||||
|
||||
If you have Invariant Sections without Cover Texts, or some other
|
||||
combination of the three, merge those two alternatives to suit the
|
||||
situation.
|
||||
|
||||
If your document contains nontrivial examples of program code, we
|
||||
recommend releasing these examples in parallel under your choice of
|
||||
free software license, such as the GNU General Public License,
|
||||
to permit their use in free software.
|
||||
11
third_party/rust/procfs-core/Cargo.toml
vendored
11
third_party/rust/procfs-core/Cargo.toml
vendored
@@ -13,8 +13,13 @@
|
||||
edition = "2018"
|
||||
rust-version = "1.48"
|
||||
name = "procfs-core"
|
||||
version = "0.16.0"
|
||||
version = "0.17.0"
|
||||
authors = ["Andrew Chin <achin@eminence32.net>"]
|
||||
build = false
|
||||
autobins = false
|
||||
autoexamples = false
|
||||
autotests = false
|
||||
autobenches = false
|
||||
description = "Data structures and parsing for the linux procfs pseudo-filesystem"
|
||||
documentation = "https://docs.rs/procfs-core/"
|
||||
readme = "README.md"
|
||||
@@ -34,6 +39,10 @@ repository = "https://github.com/eminence/procfs"
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
[lib]
|
||||
name = "procfs_core"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies.backtrace]
|
||||
version = "0.3"
|
||||
optional = true
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user