Bug 1968156 - Vendor application-services 8986582 for suggest geonames and weather changes. r=daisuke,supply-chain-reviewers

This vendors this revision:
8986582d37

It also makes some desktop fixes due to some breaking changes in Suggest, which
@daisuke previously reviewed. It's a large vendor due to vendoring some new
crates plus some app-services revisions that made changes to logging and error
reporting and touched lots of files.

Differential Revision: https://phabricator.services.mozilla.com/D250877
This commit is contained in:
Drew Willcoxon
2025-05-23 19:38:50 +00:00
committed by dwillcoxon@mozilla.com
parent 69b24b2ce3
commit 3d0a207aee
167 changed files with 48484 additions and 2337 deletions

View File

@@ -70,9 +70,9 @@ git = "https://github.com/jfkthame/mapped_hyph.git"
rev = "eff105f6ad7ec9b79816cfc1985a28e5340ad14b" rev = "eff105f6ad7ec9b79816cfc1985a28e5340ad14b"
replace-with = "vendored-sources" replace-with = "vendored-sources"
[source."git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285"] [source."git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424"]
git = "https://github.com/mozilla/application-services" git = "https://github.com/mozilla/application-services"
rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" rev = "8986582d377eac7b64bddec8aa9c5ce06b161424"
replace-with = "vendored-sources" replace-with = "vendored-sources"
[source."git+https://github.com/mozilla/audioipc?rev=e6f44a2bd1e57d11dfc737632a9e849077632330"] [source."git+https://github.com/mozilla/audioipc?rev=e6f44a2bd1e57d11dfc737632a9e849077632330"]

72
Cargo.lock generated
View File

@@ -973,12 +973,11 @@ dependencies = [
[[package]] [[package]]
name = "context_id" name = "context_id"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"chrono", "chrono",
"error-support", "error-support",
"lazy_static", "lazy_static",
"log",
"parking_lot", "parking_lot",
"serde", "serde",
"serde_json", "serde_json",
@@ -1913,8 +1912,9 @@ dependencies = [
[[package]] [[package]]
name = "error-support" name = "error-support"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"env_logger",
"error-support-macros", "error-support-macros",
"lazy_static", "lazy_static",
"log", "log",
@@ -1925,7 +1925,7 @@ dependencies = [
[[package]] [[package]]
name = "error-support-macros" name = "error-support-macros"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -2044,7 +2044,7 @@ dependencies = [
[[package]] [[package]]
name = "firefox-versioning" name = "firefox-versioning"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"serde_json", "serde_json",
"thiserror 1.999.999", "thiserror 1.999.999",
@@ -3385,7 +3385,7 @@ dependencies = [
[[package]] [[package]]
name = "interrupt-support" name = "interrupt-support"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"lazy_static", "lazy_static",
"parking_lot", "parking_lot",
@@ -5108,7 +5108,7 @@ checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba"
[[package]] [[package]]
name = "payload-support" name = "payload-support"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"serde", "serde",
"serde_derive", "serde_derive",
@@ -5615,13 +5615,12 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da"
[[package]] [[package]]
name = "relevancy" name = "relevancy"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"base64 0.21.999", "base64 0.21.999",
"error-support", "error-support",
"interrupt-support", "interrupt-support",
"log",
"md-5", "md-5",
"parking_lot", "parking_lot",
"rand", "rand",
@@ -5640,14 +5639,13 @@ dependencies = [
[[package]] [[package]]
name = "remote_settings" name = "remote_settings"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"camino", "camino",
"error-support", "error-support",
"firefox-versioning", "firefox-versioning",
"jexl-eval", "jexl-eval",
"log",
"parking_lot", "parking_lot",
"regex", "regex",
"rusqlite 0.33.0", "rusqlite 0.33.0",
@@ -5933,7 +5931,7 @@ dependencies = [
[[package]] [[package]]
name = "search" name = "search"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"error-support", "error-support",
"firefox-versioning", "firefox-versioning",
@@ -6225,11 +6223,11 @@ dependencies = [
[[package]] [[package]]
name = "sql-support" name = "sql-support"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"error-support",
"interrupt-support", "interrupt-support",
"lazy_static", "lazy_static",
"log",
"parking_lot", "parking_lot",
"rusqlite 0.33.0", "rusqlite 0.33.0",
"tempfile", "tempfile",
@@ -6431,14 +6429,13 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]] [[package]]
name = "suggest" name = "suggest"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"chrono", "chrono",
"error-support", "error-support",
"extend", "extend",
"interrupt-support", "interrupt-support",
"log",
"once_cell", "once_cell",
"parking_lot", "parking_lot",
"remote_settings", "remote_settings",
@@ -6448,6 +6445,8 @@ dependencies = [
"serde_json", "serde_json",
"sql-support", "sql-support",
"thiserror 1.999.999", "thiserror 1.999.999",
"unicase",
"unicode-normalization",
"uniffi", "uniffi",
"url", "url",
"viaduct", "viaduct",
@@ -6483,7 +6482,7 @@ dependencies = [
[[package]] [[package]]
name = "sync-guid" name = "sync-guid"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"base64 0.21.999", "base64 0.21.999",
"rand", "rand",
@@ -6494,13 +6493,12 @@ dependencies = [
[[package]] [[package]]
name = "sync15" name = "sync15"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"error-support", "error-support",
"interrupt-support", "interrupt-support",
"lazy_static", "lazy_static",
"log",
"payload-support", "payload-support",
"serde", "serde",
"serde_derive", "serde_derive",
@@ -6534,13 +6532,12 @@ dependencies = [
[[package]] [[package]]
name = "tabs" name = "tabs"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"error-support", "error-support",
"interrupt-support", "interrupt-support",
"lazy_static", "lazy_static",
"log",
"payload-support", "payload-support",
"rusqlite 0.33.0", "rusqlite 0.33.0",
"serde", "serde",
@@ -6694,6 +6691,21 @@ dependencies = [
"zerovec", "zerovec",
] ]
[[package]]
name = "tinyvec"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71"
dependencies = [
"tinyvec_macros",
]
[[package]]
name = "tinyvec_macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]] [[package]]
name = "to_shmem" name = "to_shmem"
version = "0.1.0" version = "0.1.0"
@@ -6878,7 +6890,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]] [[package]]
name = "types" name = "types"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"rusqlite 0.33.0", "rusqlite 0.33.0",
"serde", "serde",
@@ -6953,6 +6965,15 @@ version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"
[[package]]
name = "unicode-normalization"
version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956"
dependencies = [
"tinyvec",
]
[[package]] [[package]]
name = "unicode-width" name = "unicode-width"
version = "0.1.999" version = "0.1.999"
@@ -7281,10 +7302,10 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]] [[package]]
name = "viaduct" name = "viaduct"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"error-support",
"ffi-support", "ffi-support",
"log",
"once_cell", "once_cell",
"parking_lot", "parking_lot",
"prost", "prost",
@@ -7450,14 +7471,13 @@ dependencies = [
[[package]] [[package]]
name = "webext-storage" name = "webext-storage"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=fa8a72a77f88bc8b3743b50d76fb85cb37a38285#fa8a72a77f88bc8b3743b50d76fb85cb37a38285" source = "git+https://github.com/mozilla/application-services?rev=8986582d377eac7b64bddec8aa9c5ce06b161424#8986582d377eac7b64bddec8aa9c5ce06b161424"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"error-support", "error-support",
"ffi-support", "ffi-support",
"interrupt-support", "interrupt-support",
"lazy_static", "lazy_static",
"log",
"parking_lot", "parking_lot",
"rusqlite 0.33.0", "rusqlite 0.33.0",
"serde", "serde",

View File

@@ -265,15 +265,15 @@ wr_malloc_size_of = { path = "gfx/wr/wr_malloc_size_of" }
objc = { git = "https://github.com/glandium/rust-objc", rev = "4de89f5aa9851ceca4d40e7ac1e2759410c04324" } objc = { git = "https://github.com/glandium/rust-objc", rev = "4de89f5aa9851ceca4d40e7ac1e2759410c04324" }
# application-services overrides to make updating them all simpler. # application-services overrides to make updating them all simpler.
context_id = { git = "https://github.com/mozilla/application-services", rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" } context_id = { git = "https://github.com/mozilla/application-services", rev = "8986582d377eac7b64bddec8aa9c5ce06b161424" }
interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" } interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "8986582d377eac7b64bddec8aa9c5ce06b161424" }
relevancy = { git = "https://github.com/mozilla/application-services", rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" } relevancy = { git = "https://github.com/mozilla/application-services", rev = "8986582d377eac7b64bddec8aa9c5ce06b161424" }
search = { git = "https://github.com/mozilla/application-services", rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" } search = { git = "https://github.com/mozilla/application-services", rev = "8986582d377eac7b64bddec8aa9c5ce06b161424" }
sql-support = { git = "https://github.com/mozilla/application-services", rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" } sql-support = { git = "https://github.com/mozilla/application-services", rev = "8986582d377eac7b64bddec8aa9c5ce06b161424" }
suggest = { git = "https://github.com/mozilla/application-services", rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" } suggest = { git = "https://github.com/mozilla/application-services", rev = "8986582d377eac7b64bddec8aa9c5ce06b161424" }
sync15 = { git = "https://github.com/mozilla/application-services", rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" } sync15 = { git = "https://github.com/mozilla/application-services", rev = "8986582d377eac7b64bddec8aa9c5ce06b161424" }
tabs = { git = "https://github.com/mozilla/application-services", rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" } tabs = { git = "https://github.com/mozilla/application-services", rev = "8986582d377eac7b64bddec8aa9c5ce06b161424" }
viaduct = { git = "https://github.com/mozilla/application-services", rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" } viaduct = { git = "https://github.com/mozilla/application-services", rev = "8986582d377eac7b64bddec8aa9c5ce06b161424" }
webext-storage = { git = "https://github.com/mozilla/application-services", rev = "fa8a72a77f88bc8b3743b50d76fb85cb37a38285" } webext-storage = { git = "https://github.com/mozilla/application-services", rev = "8986582d377eac7b64bddec8aa9c5ce06b161424" }
allocator-api2 = { path = "third_party/rust/allocator-api2" } allocator-api2 = { path = "third_party/rust/allocator-api2" }

View File

@@ -99,10 +99,12 @@ class _GeolocationUtils {
* `locationFromItem(item)` and it should return an object with the * `locationFromItem(item)` and it should return an object with the
* following properties, all optional: * following properties, all optional:
* *
* {number} latitude * {String|number} latitude
* The location's latitude in decimal coordinates. * The location's latitude in decimal coordinates as either a string or
* {number} longitude * float.
* The location's longitude in decimal coordinates. * {String|number} longitude
* The location's longitude in decimal coordinates as either a string or
* float.
* {string} country * {string} country
* The location's two-digit ISO country code. Case doesn't matter. * The location's two-digit ISO country code. Case doesn't matter.
* {string} region * {string} region
@@ -158,9 +160,9 @@ class _GeolocationUtils {
* `geo` does not include a location or coordinates, null is returned. * `geo` does not include a location or coordinates, null is returned.
*/ */
#bestByDistance(geo, items, locationFromItem) { #bestByDistance(geo, items, locationFromItem) {
let geoLat = geo.location?.latitude; let geoLat = parseFloat(geo.location?.latitude);
let geoLong = geo.location?.longitude; let geoLong = parseFloat(geo.location?.longitude);
if (typeof geoLat != "number" || typeof geoLong != "number") { if (isNaN(geoLat) || isNaN(geoLong)) {
return null; return null;
} }
@@ -174,15 +176,17 @@ class _GeolocationUtils {
let dMin = Infinity; let dMin = Infinity;
for (let item of items) { for (let item of items) {
let location = locationFromItem(item); let location = locationFromItem(item);
if ( if (!location) {
typeof location.latitude != "number" ||
typeof location.longitude != "number"
) {
continue; continue;
} }
let [itemLat, itemLong] = [location.latitude, location.longitude].map(
toRadians let locationLat = parseFloat(location.latitude);
); let locationLong = parseFloat(location.longitude);
if (isNaN(locationLat) || isNaN(locationLong)) {
continue;
}
let [itemLat, itemLong] = [locationLat, locationLong].map(toRadians);
let d = let d =
EARTH_RADIUS_KM * EARTH_RADIUS_KM *
Math.acos( Math.acos(
@@ -244,7 +248,7 @@ class _GeolocationUtils {
let bestRegionTuple; let bestRegionTuple;
for (let item of items) { for (let item of items) {
let location = locationFromItem(item); let location = locationFromItem(item);
if (location.country?.toLowerCase() == geoCountry) { if (location?.country?.toLowerCase() == geoCountry) {
if ( if (
!bestCountryTuple || !bestCountryTuple ||
hasLargerPopulation(location, bestCountryTuple.location) hasLargerPopulation(location, bestCountryTuple.location)

View File

@@ -197,7 +197,15 @@ export class WeatherSuggestions extends SuggestProvider {
if (suggestions.length <= 1) { if (suggestions.length <= 1) {
return suggestions; return suggestions;
} }
let suggestion = await lazy.GeolocationUtils.best(suggestions);
let suggestion = await lazy.GeolocationUtils.best(suggestions, s => ({
latitude: s.city?.latitude,
longitude: s.city?.longitude,
country: s.city?.countryCode,
region: s.city?.adminDivisionCodes.get(1),
population: s.city?.population,
}));
return [suggestion]; return [suggestion];
} }
@@ -215,9 +223,16 @@ export class WeatherSuggestions extends SuggestProvider {
// Set up location params to pass to Merino. We need to null-check each // Set up location params to pass to Merino. We need to null-check each
// suggestion property because `MerinoClient` will stringify null values. // suggestion property because `MerinoClient` will stringify null values.
let otherParams = {}; let otherParams = {};
for (let key of ["city", "region", "country"]) { if (suggestion.city) {
if (suggestion[key]) { if (suggestion.city.name) {
otherParams[key] = suggestion[key]; otherParams.city = suggestion.city.name;
}
if (suggestion.city.countryCode) {
otherParams.country = suggestion.city.countryCode;
}
let admin1Code = suggestion.city.adminDivisionCodes.get(1);
if (admin1Code) {
otherParams.region = admin1Code;
} }
} }

View File

@@ -11,8 +11,6 @@ ChromeUtils.defineESModuleGetters(lazy, {
"resource:///modules/urlbar/private/GeolocationUtils.sys.mjs", "resource:///modules/urlbar/private/GeolocationUtils.sys.mjs",
GeonameMatchType: GeonameMatchType:
"moz-src:///toolkit/components/uniffi-bindgen-gecko-js/components/generated/RustSuggest.sys.mjs", "moz-src:///toolkit/components/uniffi-bindgen-gecko-js/components/generated/RustSuggest.sys.mjs",
GeonameType:
"moz-src:///toolkit/components/uniffi-bindgen-gecko-js/components/generated/RustSuggest.sys.mjs",
QuickSuggest: "resource:///modules/QuickSuggest.sys.mjs", QuickSuggest: "resource:///modules/QuickSuggest.sys.mjs",
UrlbarPrefs: "resource:///modules/UrlbarPrefs.sys.mjs", UrlbarPrefs: "resource:///modules/UrlbarPrefs.sys.mjs",
UrlbarResult: "resource:///modules/UrlbarResult.sys.mjs", UrlbarResult: "resource:///modules/UrlbarResult.sys.mjs",
@@ -495,8 +493,7 @@ export class YelpSuggestions extends SuggestProvider {
regionMatches = await lazy.QuickSuggest.rustBackend.fetchGeonames( regionMatches = await lazy.QuickSuggest.rustBackend.fetchGeonames(
region, region,
false, // prefix matching false, // prefix matching
lazy.GeonameType.REGION, null // geonames filter array
null
); );
if (!regionMatches.length) { if (!regionMatches.length) {
// The user typed something we thought was a region but isn't, so assume // The user typed something we thought was a region but isn't, so assume
@@ -509,7 +506,6 @@ export class YelpSuggestions extends SuggestProvider {
let cityMatches = await lazy.QuickSuggest.rustBackend.fetchGeonames( let cityMatches = await lazy.QuickSuggest.rustBackend.fetchGeonames(
city, city,
true, // prefix matching true, // prefix matching
lazy.GeonameType.CITY,
regionMatches?.map(m => m.geoname) regionMatches?.map(m => m.geoname)
); );
// Discard prefix matches on any names that aren't full names, i.e., on // Discard prefix matches on any names that aren't full names, i.e., on
@@ -530,7 +526,10 @@ export class YelpSuggestions extends SuggestProvider {
cityMatches, cityMatches,
locationFromGeonameMatch locationFromGeonameMatch
); );
return { city: best.geoname.name, region: best.geoname.admin1Code }; return {
city: best.geoname.name,
region: best.geoname.adminDivisionCodes.get(1),
};
} }
// We didn't detect a city in the query but we detected a region, so try to // We didn't detect a city in the query but we detected a region, so try to
@@ -574,7 +573,7 @@ function locationFromGeonameMatch(match) {
latitude: match.geoname.latitude, latitude: match.geoname.latitude,
longitude: match.geoname.longitude, longitude: match.geoname.longitude,
country: match.geoname.countryCode, country: match.geoname.countryCode,
region: match.geoname.admin1Code, region: match.geoname.adminDivisionCodes.get(1),
population: match.geoname.population, population: match.geoname.population,
}; };
} }

View File

@@ -564,10 +564,6 @@ class MockMerinoServer {
"MockMerinoServer received request with query string: " + "MockMerinoServer received request with query string: " +
JSON.stringify(httpRequest.queryString) JSON.stringify(httpRequest.queryString)
); );
this.info(
"MockMerinoServer replying with response: " +
JSON.stringify(this.response)
);
// Add the request to the list of received requests. // Add the request to the list of received requests.
this.#requests.push(httpRequest); this.#requests.push(httpRequest);
@@ -581,6 +577,10 @@ class MockMerinoServer {
let response = this.#requestHandler?.(httpRequest) || this.response; let response = this.#requestHandler?.(httpRequest) || this.response;
this.info(
"MockMerinoServer replying with response: " + JSON.stringify(response)
);
let finishResponse = () => { let finishResponse = () => {
let status = response.status || 200; let status = response.status || 200;
httpResponse.setStatusLine("", status, status); httpResponse.setStatusLine("", status, status);

View File

@@ -658,32 +658,24 @@ class _QuickSuggestTestUtils {
min_keyword_length = undefined, min_keyword_length = undefined,
score = 0.29, score = 0.29,
} = {}) { } = {}) {
let [maxLen, maxWordCount] = keywords.reduce(
([len, wordCount], kw) => [
Math.max(len, kw.length),
Math.max(wordCount, kw.split(/\s+/).filter(s => !!s).length),
],
[0, 0]
);
return { return {
type: "weather", type: "weather",
attachment: { attachment: {
keywords, keywords,
min_keyword_length, min_keyword_length,
score, score,
max_keyword_length: maxLen,
max_keyword_word_count: maxWordCount,
}, },
}; };
} }
/** /**
* Returns a remote settings geonames record populated with some cities. * Returns remote settings records containing geonames populated with some
* cities.
* *
* @returns {object} * @returns {Array}
* A geonames record for storing in remote settings. * One or more geonames records for storing in remote settings.
*/ */
geonamesRecord() { geonamesRecords() {
let geonames = [ let geonames = [
// Waterloo, AL // Waterloo, AL
{ {
@@ -693,11 +685,9 @@ class _QuickSuggestTestUtils {
longitude: "-88.0642", longitude: "-88.0642",
feature_class: "P", feature_class: "P",
feature_code: "PPL", feature_code: "PPL",
country_code: "US", country: "US",
admin1_code: "AL", admin1: "AL",
population: 200, population: 200,
alternate_names: ["waterloo"],
alternate_names_2: [{ name: "waterloo" }],
}, },
// AL // AL
{ {
@@ -707,14 +697,9 @@ class _QuickSuggestTestUtils {
longitude: "-86.75026", longitude: "-86.75026",
feature_class: "A", feature_class: "A",
feature_code: "ADM1", feature_code: "ADM1",
country_code: "US", country: "US",
admin1_code: "AL", admin1: "AL",
population: 4530315, population: 4530315,
alternate_names: ["al", "alabama"],
alternate_names_2: [
{ name: "alabama" },
{ name: "al", iso_language: "abbr" },
],
}, },
// Waterloo, IA // Waterloo, IA
{ {
@@ -724,11 +709,9 @@ class _QuickSuggestTestUtils {
longitude: "-92.34296", longitude: "-92.34296",
feature_class: "P", feature_class: "P",
feature_code: "PPLA2", feature_code: "PPLA2",
country_code: "US", country: "US",
admin1_code: "IA", admin1: "IA",
population: 68460, population: 68460,
alternate_names: ["waterloo"],
alternate_names_2: [{ name: "waterloo" }],
}, },
// IA // IA
{ {
@@ -738,14 +721,9 @@ class _QuickSuggestTestUtils {
longitude: "-93.50049", longitude: "-93.50049",
feature_class: "A", feature_class: "A",
feature_code: "ADM1", feature_code: "ADM1",
country_code: "US", country: "US",
admin1_code: "IA", admin1: "IA",
population: 2955010, population: 2955010,
alternate_names: ["ia", "iowa"],
alternate_names_2: [
{ name: "iowa" },
{ name: "ia", iso_language: "abbr" },
],
}, },
// Made-up cities with the same name in the US and CA. The CA city has a // Made-up cities with the same name in the US and CA. The CA city has a
// larger population. // larger population.
@@ -756,11 +734,9 @@ class _QuickSuggestTestUtils {
longitude: "-97.92977", longitude: "-97.92977",
feature_class: "P", feature_class: "P",
feature_code: "PPL", feature_code: "PPL",
country_code: "US", country: "US",
admin1_code: "IA", admin1: "IA",
population: 1, population: 1,
alternate_names: ["us ca city"],
alternate_names_2: [{ name: "us ca city" }],
}, },
{ {
id: 101, id: 101,
@@ -769,11 +745,9 @@ class _QuickSuggestTestUtils {
longitude: "-73.58781", longitude: "-73.58781",
feature_class: "P", feature_class: "P",
feature_code: "PPL", feature_code: "PPL",
country_code: "CA", country: "CA",
admin1_code: "08", admin1: "08",
population: 2, population: 2,
alternate_names: ["us ca city"],
alternate_names_2: [{ name: "us ca city" }],
}, },
// Made-up cities that are only ~1.5 km apart. // Made-up cities that are only ~1.5 km apart.
{ {
@@ -783,11 +757,9 @@ class _QuickSuggestTestUtils {
longitude: "-84.39", longitude: "-84.39",
feature_class: "P", feature_class: "P",
feature_code: "PPL", feature_code: "PPL",
country_code: "US", country: "US",
admin1_code: "GA", admin1: "GA",
population: 1, population: 1,
alternate_names: ["twin city a"],
alternate_names_2: [{ name: "twin city a" }],
}, },
{ {
id: 103, id: 103,
@@ -796,11 +768,9 @@ class _QuickSuggestTestUtils {
longitude: "-84.4", longitude: "-84.4",
feature_class: "P", feature_class: "P",
feature_code: "PPL", feature_code: "PPL",
country_code: "US", country: "US",
admin1_code: "GA", admin1: "GA",
population: 2, population: 2,
alternate_names: ["twin city b"],
alternate_names_2: [{ name: "twin city b" }],
}, },
{ {
id: 1850147, id: 1850147,
@@ -809,33 +779,42 @@ class _QuickSuggestTestUtils {
longitude: "139.69171", longitude: "139.69171",
feature_class: "P", feature_class: "P",
feature_code: "PPLC", feature_code: "PPLC",
country_code: "JP", country: "JP",
admin1_code: "Tokyo-to", admin1: "Tokyo-to",
population: 8336599, population: 8336599,
alternate_names: ["tokyo"],
alternate_names_2: [{ name: "tokyo" }],
}, },
]; ];
let [maxLen, maxWordCount] = geonames.reduce(
([len, wordCount], geoname) => [ return [
Math.max(len, ...geoname.alternate_names.map(n => n.length)), {
Math.max( type: "geonames-2",
wordCount, attachment: geonames,
...geoname.alternate_names.map(
n => n.split(/\s+/).filter(s => !!s).length
)
),
],
[0, 0]
);
return {
type: "geonames",
attachment: {
geonames,
max_alternate_name_length: maxLen,
max_alternate_name_word_count: maxWordCount,
}, },
}; ];
}
/**
* Returns remote settings records containing geonames alternates (alternate
* names) populated with some names.
*
* @returns {Array}
* One or more geonames alternates records for storing in remote settings.
*/
geonamesAlternatesRecords() {
return [
{
type: "geonames-alternates",
attachment: [
{
language: "abbr",
alternates_by_geoname_id: [
[2, ["AL"]],
[4, ["IA"]],
],
},
],
},
];
} }
/** /**

View File

@@ -497,14 +497,17 @@ export class RemoteSettingsServer {
async #addAttachment({ bucket, collection, record }) { async #addAttachment({ bucket, collection, record }) {
let { attachment } = record; let { attachment } = record;
let bytes; let mimetype =
if (attachment instanceof Array) { record.attachmentMimetype ?? "application/json; charset=UTF-8";
bytes = Uint8Array.from(attachment); if (!mimetype.startsWith("application/json")) {
} else { throw new Error(
let encoder = new TextEncoder(); "Mimetype not handled, please add code for it! " + mimetype
bytes = encoder.encode(JSON.stringify(attachment)); );
} }
let encoder = new TextEncoder();
let bytes = encoder.encode(JSON.stringify(attachment));
let hashBuffer = await crypto.subtle.digest("SHA-256", bytes); let hashBuffer = await crypto.subtle.digest("SHA-256", bytes);
let hashBytes = new Uint8Array(hashBuffer); let hashBytes = new Uint8Array(hashBuffer);
let toHex = b => b.toString(16).padStart(2, "0"); let toHex = b => b.toString(16).padStart(2, "0");
@@ -521,7 +524,7 @@ export class RemoteSettingsServer {
record.attachment = { record.attachment = {
hash, hash,
filename, filename,
mimetype: record.attachmentMimetype ?? "application/json; charset=UTF-8", mimetype,
size: bytes.length, size: bytes.length,
location: `attachments/${bucket}/${collection}/${filename}`, location: `attachments/${bucket}/${collection}/${filename}`,
}; };

View File

@@ -9,18 +9,6 @@
requestLongerTimeout(5); requestLongerTimeout(5);
const REMOTE_SETTINGS_RECORDS = [ const REMOTE_SETTINGS_RECORDS = [
{
type: "icon",
id: "icon-fakespot-amazon",
attachmentMimetype: "image/png",
attachment: [1, 2, 3],
},
{
type: "icon",
id: "icon-fakespot-bestbuy",
attachmentMimetype: "image/svg+xml",
attachment: [4, 5, 6],
},
{ {
collection: "fakespot-suggest-products", collection: "fakespot-suggest-products",
type: "fakespot-suggestions", type: "fakespot-suggestions",
@@ -576,52 +564,3 @@ add_task(async function ratingAndTotalReviewsLabel() {
await UrlbarTestUtils.promisePopupClose(window); await UrlbarTestUtils.promisePopupClose(window);
} }
}); });
// Test the icons.
add_task(async function icons() {
const testData = [
{
input: "png image",
expectedIcon: REMOTE_SETTINGS_RECORDS.find(
r => r.id == "icon-fakespot-amazon"
),
},
{
input: "svg image",
expectedIcon: REMOTE_SETTINGS_RECORDS.find(
r => r.id == "icon-fakespot-bestbuy"
),
},
{ input: "no image", expectedIcon: null },
];
for (const { input, expectedIcon } of testData) {
await UrlbarTestUtils.promiseAutocompleteResultPopup({
window,
value: input,
});
Assert.equal(UrlbarTestUtils.getResultCount(window), 2);
const { element } = await UrlbarTestUtils.getDetailsOfResultAt(window, 1);
const src = element.row.querySelector(
".urlbarView-dynamic-fakespot-icon"
).src;
if (!expectedIcon) {
Assert.equal(src, "");
return;
}
const content = await fetch(src);
const blob = await content.blob();
const bytes = await blob.bytes();
Assert.equal(blob.type, expectedIcon.attachmentMimetype);
Assert.equal(
new TextDecoder().decode(bytes),
JSON.stringify(expectedIcon.attachment)
);
await UrlbarTestUtils.promisePopupClose(window);
}
});

View File

@@ -19,7 +19,8 @@ const REMOTE_SETTINGS_RECORDS = [
score: 0.5, score: 0.5,
}, },
}, },
QuickSuggestTestUtils.geonamesRecord(), ...QuickSuggestTestUtils.geonamesRecords(),
...QuickSuggestTestUtils.geonamesAlternatesRecords(),
]; ];
add_setup(async function () { add_setup(async function () {

View File

@@ -22,7 +22,8 @@ const REMOTE_SETTINGS_RECORDS = [
score: 0.5, score: 0.5,
}, },
}, },
QuickSuggestTestUtils.geonamesRecord(), ...QuickSuggestTestUtils.geonamesRecords(),
...QuickSuggestTestUtils.geonamesAlternatesRecords(),
]; ];
const TOKYO_RESULT = { const TOKYO_RESULT = {

View File

@@ -25,7 +25,8 @@ const REMOTE_SETTINGS_RECORDS = [
score: 0.5, score: 0.5,
}, },
}, },
QuickSuggestTestUtils.geonamesRecord(), ...QuickSuggestTestUtils.geonamesRecords(),
...QuickSuggestTestUtils.geonamesAlternatesRecords(),
]; ];
const WATERLOO_RESULT = { const WATERLOO_RESULT = {
@@ -383,7 +384,8 @@ add_task(async function cache_fromRust() {
// Rust suggestion is not present in remote settings. // Rust suggestion is not present in remote settings.
add_task(async function cache_defaultValues() { add_task(async function cache_defaultValues() {
await QuickSuggestTestUtils.setRemoteSettingsRecords([ await QuickSuggestTestUtils.setRemoteSettingsRecords([
QuickSuggestTestUtils.geonamesRecord(), ...QuickSuggestTestUtils.geonamesRecords(),
...QuickSuggestTestUtils.geonamesAlternatesRecords(),
]); ]);
await doCacheTest({ await doCacheTest({
// This value is hardcoded in `YelpSuggestions` as the default. // This value is hardcoded in `YelpSuggestions` as the default.

View File

@@ -28,7 +28,8 @@ add_setup(async () => {
], ],
remoteSettingsRecords: [ remoteSettingsRecords: [
QuickSuggestTestUtils.weatherRecord(), QuickSuggestTestUtils.weatherRecord(),
QuickSuggestTestUtils.geonamesRecord(), ...QuickSuggestTestUtils.geonamesRecords(),
...QuickSuggestTestUtils.geonamesAlternatesRecords(),
], ],
}); });

View File

@@ -8,6 +8,10 @@
:members: :members:
:exclude-members: SuggestStoreBuilder :exclude-members: SuggestStoreBuilder
``` ```
```{js:autoclass} RustSuggest.sys.AlternateNames
:members:
:exclude-members: AlternateNames
```
```{js:autoclass} RustSuggest.sys.FtsMatchInfo ```{js:autoclass} RustSuggest.sys.FtsMatchInfo
:members: :members:
:exclude-members: FtsMatchInfo :exclude-members: FtsMatchInfo
@@ -16,6 +20,10 @@
:members: :members:
:exclude-members: Geoname :exclude-members: Geoname
``` ```
```{js:autoclass} RustSuggest.sys.GeonameAlternates
:members:
:exclude-members: GeonameAlternates
```
```{js:autoclass} RustSuggest.sys.GeonameMatch ```{js:autoclass} RustSuggest.sys.GeonameMatch
:members: :members:
:exclude-members: GeonameMatch :exclude-members: GeonameMatch

View File

@@ -5372,6 +5372,11 @@ who = "Makoto Kato <m_kato@ga2.so-net.ne.jp>"
criteria = "safe-to-deploy" criteria = "safe-to-deploy"
delta = "0.7.4 -> 0.7.6" delta = "0.7.4 -> 0.7.6"
[[audits.tinyvec_macros]]
who = "Drew Willcoxon <adw@mozilla.com>"
criteria = "safe-to-deploy"
delta = "0.1.0 -> 0.1.1"
[[audits.to_shmem]] [[audits.to_shmem]]
who = "Emilio Cobos Álvarez <emilio@crisal.io>" who = "Emilio Cobos Álvarez <emilio@crisal.io>"
criteria = "safe-to-deploy" criteria = "safe-to-deploy"

View File

@@ -696,6 +696,13 @@ user-id = 3618
user-login = "dtolnay" user-login = "dtolnay"
user-name = "David Tolnay" user-name = "David Tolnay"
[[publisher.unicode-normalization]]
version = "0.1.24"
when = "2024-09-17"
user-id = 1139
user-login = "Manishearth"
user-name = "Manish Goregaokar"
[[publisher.unicode-width]] [[publisher.unicode-width]]
version = "0.2.0" version = "0.2.0"
when = "2024-09-19" when = "2024-09-19"
@@ -1400,6 +1407,26 @@ criteria = "safe-to-deploy"
delta = "3.5.0 -> 3.6.0" delta = "3.5.0 -> 3.6.0"
notes = "Dependency updates and new optimized trait implementations, but otherwise everything looks normal." notes = "Dependency updates and new optimized trait implementations, but otherwise everything looks normal."
[[audits.bytecode-alliance.audits.tinyvec]]
who = "Alex Crichton <alex@alexcrichton.com>"
criteria = "safe-to-deploy"
version = "1.6.0"
notes = """
This crate, while it implements collections, does so without `std::*` APIs and
without `unsafe`. Skimming the crate everything looks reasonable and what one
would expect from idiomatic safe collections in Rust.
"""
[[audits.bytecode-alliance.audits.tinyvec_macros]]
who = "Alex Crichton <alex@alexcrichton.com>"
criteria = "safe-to-deploy"
version = "0.1.0"
notes = """
This is a trivial crate which only contains a singular macro definition which is
intended to multiplex across the internal representation of a tinyvec,
presumably. This trivially doesn't contain anything bad.
"""
[[audits.bytecode-alliance.audits.unicase]] [[audits.bytecode-alliance.audits.unicase]]
who = "Alex Crichton <alex@alexcrichton.com>" who = "Alex Crichton <alex@alexcrichton.com>"
criteria = "safe-to-deploy" criteria = "safe-to-deploy"
@@ -2032,6 +2059,42 @@ Previously reviewed during security review and the audit is grandparented in.
""" """
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT" aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
[[audits.google.audits.tinyvec]]
who = "Adrian Taylor <adetaylor@chromium.org>"
criteria = "safe-to-deploy"
delta = "1.6.0 -> 1.6.1"
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
[[audits.google.audits.tinyvec]]
who = "Adrian Taylor <adetaylor@chromium.org>"
criteria = "safe-to-deploy"
delta = "1.6.1 -> 1.7.0"
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
[[audits.google.audits.tinyvec]]
who = "Dustin J. Mitchell <djmitche@chromium.org>"
criteria = "safe-to-deploy"
delta = "1.7.0 -> 1.8.0"
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
[[audits.google.audits.tinyvec]]
who = "Adrian Taylor <adetaylor@chromium.org>"
criteria = "safe-to-deploy"
delta = "1.8.0 -> 1.8.1"
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
[[audits.google.audits.tinyvec]]
who = "Daniel Cheng <dcheng@chromium.org>"
criteria = "safe-to-deploy"
delta = "1.8.1 -> 1.9.0"
notes = """
Larger delta, but no unsafe code introduced. Deltas for:
- borsh (Binary Object Representation Serializer for Hashing) serialization/deserialization support behind the `borsh` feature.
- trait implementations to interoperate with the generic-array crate
- miscellaneous helper functions and support code, e.g. `into_vec()`.
"""
aggregated-from = "https://chromium.googlesource.com/chromium/src/+/main/third_party/rust/chromium_crates_io/supply-chain/audits.toml?format=TEXT"
[[audits.google.audits.tokio-stream]] [[audits.google.audits.tokio-stream]]
who = "David Koloski <dkoloski@google.com>" who = "David Koloski <dkoloski@google.com>"
criteria = "safe-to-deploy" criteria = "safe-to-deploy"

View File

@@ -1 +1 @@
{"files":{"Cargo.toml":"6a4710d72aec46571c411e76c2c4fd62097e887fc90cc62bf44c454fef618d62","README.md":"478bd3dcd41863df0975c96fd38e34334d8146799bf1a16204bab2cc12dcffd2","src/callback.rs":"13305f4665e62abb4de08d9486295a60afd327b6c839d7ef314312d96dc098dc","src/error.rs":"969ba252410f2adff0ff91ec7eeaf52ffb9e91925eb9db0529f56c09872f78f8","src/lib.rs":"199c109f3a4bab8441ba46e7513602bf732c9b381477867d717dcab24b0983bf","src/mars.rs":"4909fc4f8b397d32144878d6ec64c414b7d724ed2532513f165c768cfe5b983f"},"package":null} {"files":{"Cargo.toml":"c4a478ab37cdb8bea8392dfbe9fbc2e1a534259c7be22b864b8aad86381d8718","README.md":"478bd3dcd41863df0975c96fd38e34334d8146799bf1a16204bab2cc12dcffd2","src/callback.rs":"13305f4665e62abb4de08d9486295a60afd327b6c839d7ef314312d96dc098dc","src/error.rs":"969ba252410f2adff0ff91ec7eeaf52ffb9e91925eb9db0529f56c09872f78f8","src/lib.rs":"8bd9e98e0e194cea4b2fdd0393709113785db63ccdbc0e9518fa55c21f8bb13d","src/mars.rs":"4909fc4f8b397d32144878d6ec64c414b7d724ed2532513f165c768cfe5b983f"},"package":null}

View File

@@ -28,7 +28,6 @@ path = "src/lib.rs"
[dependencies] [dependencies]
chrono = "0.4" chrono = "0.4"
log = "0.4"
parking_lot = "0.12" parking_lot = "0.12"
serde = "1" serde = "1"
serde_json = "1" serde_json = "1"

View File

@@ -7,8 +7,7 @@ mod error;
pub use error::{ApiError, ApiResult, Error, Result}; pub use error::{ApiError, ApiResult, Error, Result};
use chrono::{DateTime, Duration, Utc}; use chrono::{DateTime, Duration, Utc};
use error_support::handle_error; use error_support::{error, handle_error};
use log::error;
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use uuid::Uuid; use uuid::Uuid;

View File

@@ -1 +1 @@
{"files":{"Cargo.toml":"c64762e3ad81f92bf69f4fd17efc4b01b21e75b9bdde8ca94078500b6651c867","README.md":"69ccc6e378995b9d490d64e23b42ea1d7a9e3232e3dae6fabf1f955786a49931","build.rs":"c8d3c38c1208eea36224662b284d8daf3e7ad1b07d22d750524f3da1cc66ccca","src/errorsupport.udl":"8f8e5711913ffd1b515ec60028529768990df51001e6125d4b83c948b41c4466","src/handling.rs":"6e0568b18d426531cb2ae9967c8dd0d51ece5a065f68b15eeb308b995edaa167","src/lib.rs":"1e41747d06a0d032c9601df85dd6e95001e432ae95a75dcca859355cbadef3b0","src/macros.rs":"0d03f82fab20c96a182f941baf3fcf2a286b00fea871ee7fd8e339abc14f9522","src/redact.rs":"c9a4df1a87be68b15d583587bda941d4c60a1d0449e2d43ff99f3611a290a863","src/reporting.rs":"f4af35d5fb5bf0ebef6dc6595edac6351e1dae2bff989c18810480fae2202168","uniffi.toml":"af91bcd8e7b1fa3f475a5e556979ff23c57b338395e0b65abc1cb1a0ee823e23"},"package":null} {"files":{"Cargo.toml":"9ec1b1341ec8901768963a8c3286032d63cb574aa7ea160f0c015451bfaed524","README.md":"d820e387ac36a98c8e554fcaba13b76eb935413b535019444f6d448240e4d07e","src/handling.rs":"4b1183afe0716653918515299bc877a4d461fe8d2bb114a8c25f303203a35fdb","src/lib.rs":"6fcf1ca74e077abddfb48a6472671e35d1a422fc9e80d885da2b4a66bc781983","src/macros.rs":"27366e0424e4d700605c34bd96295cd35fa41aed8b49f30e0b6e0c59b870fe73","src/redact.rs":"c9a4df1a87be68b15d583587bda941d4c60a1d0449e2d43ff99f3611a290a863","src/reporting.rs":"eda5580fabe633bd4fe7ac69ea8056874e8adfc093a74942294edcfbaa48824f","uniffi.toml":"af91bcd8e7b1fa3f475a5e556979ff23c57b338395e0b65abc1cb1a0ee823e23"},"package":null}

View File

@@ -14,7 +14,7 @@ edition = "2021"
name = "error-support" name = "error-support"
version = "0.1.0" version = "0.1.0"
authors = ["Thom Chiovoloni <tchiovoloni@mozilla.com>"] authors = ["Thom Chiovoloni <tchiovoloni@mozilla.com>"]
build = "build.rs" build = false
exclude = ["/android"] exclude = ["/android"]
autolib = false autolib = false
autobins = false autobins = false
@@ -26,27 +26,48 @@ license = "MPL-2.0"
[features] [features]
backtrace = ["dep:backtrace"] backtrace = ["dep:backtrace"]
testing = ["tracing-support/testing"]
tracing-logging = [
"dep:tracing",
"dep:tracing-support",
]
tracing-reporting = [
"dep:tracing",
"dep:tracing-support",
]
[lib] [lib]
name = "error_support" name = "error_support"
path = "src/lib.rs" path = "src/lib.rs"
[dependencies]
log = "0.4"
[dependencies.backtrace] [dependencies.backtrace]
version = "0.3" version = "0.3"
optional = true optional = true
[dependencies.env_logger]
version = "0.10"
default-features = false
[dependencies.error-support-macros] [dependencies.error-support-macros]
path = "macros" path = "macros"
[dependencies.lazy_static] [dependencies.lazy_static]
version = "1.4" version = "1.4"
[dependencies.log]
version = "0.4"
[dependencies.parking_lot] [dependencies.parking_lot]
version = ">=0.11,<=0.12" version = ">=0.11,<=0.12"
[dependencies.tracing]
version = "0.1"
optional = true
[dependencies.tracing-support]
path = "../tracing"
optional = true
[dependencies.uniffi] [dependencies.uniffi]
version = "0.29.0" version = "0.29.0"

View File

@@ -86,3 +86,14 @@ a user's database in their errors, which would then appear in our error
variants. However, we've never seen that in practice so we are comfortable variants. However, we've never seen that in practice so we are comfortable
including the `rusqlite` error message in our error reports, without attempting including the `rusqlite` error message in our error reports, without attempting
to sanitize them. to sanitize them.
# Logging support
This crate also supplies macros for logging, like `trace!()`, `error!()` etc,
which act just like the same macros from the `log` and `tracing` crates. Using
these macros means you don't need to take additional dependencies, nor know exactly
what logging system is in-place - it will arrange to be the correct logging system
for the application the crate is being used in.
Avoiding PII in logging is up to the crate doing the logging. In general, components
might log PII (eg, URLs etc) at trace level.

View File

@@ -1,8 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
fn main() {
uniffi::generate_scaffolding("./src/errorsupport.udl").unwrap();
}

View File

@@ -1,16 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
namespace errorsupport {
/// Set the global error reporter. This is typically done early in startup.
void set_application_error_reporter(ApplicationErrorReporter error_reporter);
/// Unset the global error reporter. This is typically done at shutdown for
/// platforms that want to cleanup references like Desktop.
void unset_application_error_reporter();
};
callback interface ApplicationErrorReporter {
void report_error(string type_name, string message);
void report_breadcrumb(string message, string module, u32 line, u32 column);
};

View File

@@ -8,7 +8,7 @@
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct ErrorReporting { pub struct ErrorReporting {
/// If Some(level), will write a log message at that level. /// If Some(level), will write a log message at that level.
log_level: Option<log::Level>, log_level: Option<crate::Level>,
/// If Some(report_class) will call the error reporter with details. /// If Some(report_class) will call the error reporter with details.
report_class: Option<String>, report_class: Option<String>,
} }
@@ -35,7 +35,7 @@ impl<E> ErrorHandling<E> {
} }
/// Add logging to an ErrorHandling instance /// Add logging to an ErrorHandling instance
pub fn log(self, level: log::Level) -> Self { pub fn log(self, level: crate::Level) -> Self {
Self { Self {
err: self.err, err: self.err,
reporting: ErrorReporting { reporting: ErrorReporting {
@@ -60,12 +60,12 @@ impl<E> ErrorHandling<E> {
/// log a warning /// log a warning
pub fn log_warning(self) -> Self { pub fn log_warning(self) -> Self {
self.log(log::Level::Warn) self.log(crate::Level::Warn)
} }
/// log an info /// log an info
pub fn log_info(self) -> Self { pub fn log_info(self) -> Self {
self.log(log::Level::Info) self.log(crate::Level::Info)
} }
/// Add reporting to an ErrorHandling instance and also log an Error /// Add reporting to an ErrorHandling instance and also log an Error
@@ -73,7 +73,7 @@ impl<E> ErrorHandling<E> {
Self { Self {
err: self.err, err: self.err,
reporting: ErrorReporting { reporting: ErrorReporting {
log_level: Some(log::Level::Error), log_level: Some(crate::Level::Error),
report_class: Some(report_class.into()), report_class: Some(report_class.into()),
}, },
} }
@@ -99,9 +99,17 @@ where
let handling = e.get_error_handling(); let handling = e.get_error_handling();
let reporting = handling.reporting; let reporting = handling.reporting;
if let Some(level) = reporting.log_level { if let Some(level) = reporting.log_level {
match &reporting.report_class { // tracing dosn't seem to have anything close enough to `log::log`, so we have to match levels explicitly.
Some(report_class) => log::log!(level, "{report_class}: {}", e.to_string()), let message = match &reporting.report_class {
None => log::log!(level, "{}", e.to_string()), Some(report_class) => format!("{report_class}: {}", e),
None => format!("{}", e),
};
match level {
crate::Level::Trace => crate::trace!("{}", message),
crate::Level::Debug => crate::debug!("{}", message),
crate::Level::Info => crate::info!("{}", message),
crate::Level::Warn => crate::warn!("{}", message),
crate::Level::Error => crate::error!("{}", message),
} }
} }
if let Some(report_class) = reporting.report_class { if let Some(report_class) = reporting.report_class {

View File

@@ -2,6 +2,34 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// kinda abusing features here, but features "override" builtin support.
#[cfg(not(feature = "tracing-logging"))]
pub use log::{debug, error, info, trace, warn, Level};
#[cfg(feature = "tracing-logging")]
pub use tracing_support::{debug, error, info, trace, warn, Level};
#[cfg(all(feature = "testing", not(feature = "tracing-logging")))]
pub fn init_for_tests() {
let _ = env_logger::try_init();
}
#[cfg(all(feature = "testing", not(feature = "tracing-logging")))]
pub fn init_for_tests_with_level(level: Level) {
// There's gotta be a better way :(
let level_name = match level {
Level::Debug => "debug",
Level::Trace => "trace",
Level::Info => "info",
Level::Warn => "warn",
Level::Error => "error",
};
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or(level_name)).init();
}
#[cfg(all(feature = "testing", feature = "tracing-logging"))]
pub use tracing_support::init_for_tests;
mod macros; mod macros;
#[cfg(feature = "backtrace")] #[cfg(feature = "backtrace")]
@@ -39,13 +67,25 @@ pub mod backtrace {
mod redact; mod redact;
pub use redact::*; pub use redact::*;
#[cfg(not(feature = "tracing-reporting"))]
mod reporting; mod reporting;
#[cfg(not(feature = "tracing-reporting"))]
pub use reporting::{ pub use reporting::{
report_breadcrumb, report_error_to_app, set_application_error_reporter, set_application_error_reporter, unset_application_error_reporter, ApplicationErrorReporter,
unset_application_error_reporter, ApplicationErrorReporter,
}; };
// These are exposed specifically for tests
pub use reporting::{ArcReporterAdapter, TestErrorReporter}; #[cfg(feature = "tracing-reporting")]
mod reporting {
pub fn report_error_to_app(type_name: String, message: String) {
tracing::event!(target: "app-services-error-reporter::error", tracing::Level::ERROR, message, type_name);
}
pub fn report_breadcrumb(message: String, module: String, line: u32, column: u32) {
tracing::event!(target: "app-services-error-reporter::breadcrumb", tracing::Level::INFO, message, module, line, column);
}
}
pub use reporting::{report_breadcrumb, report_error_to_app};
pub use error_support_macros::handle_error; pub use error_support_macros::handle_error;
@@ -173,4 +213,4 @@ macro_rules! define_error {
}; };
} }
uniffi::include_scaffolding!("errorsupport"); uniffi::setup_scaffolding!("errorsupport");

View File

@@ -20,7 +20,7 @@
macro_rules! report_error { macro_rules! report_error {
($type_name:expr, $($arg:tt)*) => { ($type_name:expr, $($arg:tt)*) => {
let message = std::format!($($arg)*); let message = std::format!($($arg)*);
::log::warn!("report {}: {}", $type_name, message); $crate::warn!("report {}: {}", $type_name, message);
$crate::report_error_to_app($type_name.to_string(), message.to_string()); $crate::report_error_to_app($type_name.to_string(), message.to_string());
}; };
} }
@@ -50,7 +50,7 @@ macro_rules! breadcrumb {
($($arg:tt)*) => { ($($arg:tt)*) => {
{ {
let message = std::format!($($arg)*); let message = std::format!($($arg)*);
::log::info!("breadcrumb: {}", message); $crate::info!("breadcrumb: {}", message);
$crate::report_breadcrumb( $crate::report_breadcrumb(
message, message,
std::module_path!().to_string(), std::module_path!().to_string(),

View File

@@ -4,7 +4,6 @@
use parking_lot::RwLock; use parking_lot::RwLock;
use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::atomic::{AtomicU32, Ordering};
use std::sync::{Arc, Mutex};
/// Counter for breadcrumb messages /// Counter for breadcrumb messages
/// ///
@@ -29,6 +28,7 @@ fn get_breadcrumb_counter_value() -> u32 {
/// The application that's consuming application-services implements this via a UniFFI callback /// The application that's consuming application-services implements this via a UniFFI callback
/// interface, then calls `set_application_error_reporter()` to setup a global /// interface, then calls `set_application_error_reporter()` to setup a global
/// ApplicationErrorReporter. /// ApplicationErrorReporter.
#[uniffi::export(callback_interface)]
pub trait ApplicationErrorReporter: Sync + Send { pub trait ApplicationErrorReporter: Sync + Send {
/// Send an error report to a Sentry-like error reporting system /// Send an error report to a Sentry-like error reporting system
/// ///
@@ -50,10 +50,15 @@ lazy_static::lazy_static! {
pub(crate) static ref APPLICATION_ERROR_REPORTER: RwLock<Box<dyn ApplicationErrorReporter>> = RwLock::new(Box::new(DefaultApplicationErrorReporter)); pub(crate) static ref APPLICATION_ERROR_REPORTER: RwLock<Box<dyn ApplicationErrorReporter>> = RwLock::new(Box::new(DefaultApplicationErrorReporter));
} }
pub fn set_application_error_reporter(reporter: Box<dyn ApplicationErrorReporter>) { /// Set the global error reporter. This is typically done early in startup.
*APPLICATION_ERROR_REPORTER.write() = reporter; #[uniffi::export]
pub fn set_application_error_reporter(error_reporter: Box<dyn ApplicationErrorReporter>) {
*APPLICATION_ERROR_REPORTER.write() = error_reporter;
} }
/// Unset the global error reporter. This is typically done at shutdown for
/// platforms that want to cleanup references like Desktop.
#[uniffi::export]
pub fn unset_application_error_reporter() { pub fn unset_application_error_reporter() {
*APPLICATION_ERROR_REPORTER.write() = Box::new(DefaultApplicationErrorReporter) *APPLICATION_ERROR_REPORTER.write() = Box::new(DefaultApplicationErrorReporter)
} }
@@ -70,59 +75,3 @@ pub fn report_breadcrumb(message: String, module: String, line: u32, column: u32
.read() .read()
.report_breadcrumb(message, module, line, column); .report_breadcrumb(message, module, line, column);
} }
// Test error reporter that captures reported errors
// You should use this when you want to validate that `report_error` actually reports what you
// epect and
#[derive(Default)]
pub struct TestErrorReporter {
errors: Mutex<Vec<(String, String)>>,
}
impl TestErrorReporter {
pub fn new() -> Self {
Self {
errors: Mutex::new(Vec::new()),
}
}
pub fn get_errors(&self) -> Vec<(String, String)> {
self.errors.lock().unwrap().clone()
}
}
impl ApplicationErrorReporter for TestErrorReporter {
fn report_error(&self, type_name: String, message: String) {
if let Ok(mut errors) = self.errors.lock() {
errors.push((type_name, message));
}
}
fn report_breadcrumb(&self, _message: String, _module: String, _line: u32, _column: u32) {}
}
/// An adapter that implements `ApplicationErrorReporter` and
/// delegates all calls to an `Arc<TestErrorReporter>`.
///
/// Because `set_application_error_reporter` requires a
/// `Box<dyn ApplicationErrorReporter>`, we can't directly pass
/// an `Arc<TestErrorReporter>`; this adapter solves the mismatch.
pub struct ArcReporterAdapter {
inner: Arc<TestErrorReporter>,
}
impl ArcReporterAdapter {
pub fn new(inner: Arc<TestErrorReporter>) -> Self {
Self { inner }
}
}
impl ApplicationErrorReporter for ArcReporterAdapter {
fn report_error(&self, type_name: String, message: String) {
self.inner.report_error(type_name, message)
}
fn report_breadcrumb(&self, message: String, module: String, line: u32, column: u32) {
self.inner.report_breadcrumb(message, module, line, column)
}
}

View File

@@ -1 +1 @@
{"files":{"Cargo.toml":"fa569d53ba068f1626419e3910775ad6b5fce69f17a41a342640cba42a57f291","src/bin/generate-test-data.rs":"7cc80b56929091d02675b9dd9bf4c657a95cda502656cf2ec8d91f56d7a393c7","src/db.rs":"d9dd44501ee3b19c696d8830d3036f7bfe0e8ad7751d5a057f5d8295ebf0bd4f","src/error.rs":"3a1308e65440769d9435fc95528d4ef42994c84d88e1da04ba058491dea387c4","src/ingest.rs":"04ae6d4c65a46fa96364fc39018502de172a4393bf42bd3ede2865313e7977e5","src/interest.rs":"e4369a1280867438bca12746f71288a03b4d5e180e156f4bc0335046012565f7","src/lib.rs":"8f7408a4bebb882441b697cc16356a8d02342f57ebada515e289ce813e9f5e24","src/ranker.rs":"e71414fe79ade26f3c79dceb5211af4f37984a9cded8c938dc1da8d8d28c2ad3","src/rs.rs":"e897443bd4d8d308effeae55a955d237d22e823b5d5abfb271a25b5796ebd419","src/schema.rs":"38ea82679da2729a571aad936f96469e732ec1c104d7c21fd869842f7a5f30a3","src/url_hash.rs":"2e908316fb70923644d1990dbf470d69ce2f5e99b0c5c3d95ec691590be8ffa5","test-data":"1ef2cd092d59e7e126cd4a514af983d449ed9f9c98708702fd237464a76c2b5e"},"package":null} {"files":{"Cargo.toml":"f75eb2eab3ddc2e4a687f2d6d42bbf15d465f421118e0082a8c7440c285c7d9a","src/bin/generate-test-data.rs":"7cc80b56929091d02675b9dd9bf4c657a95cda502656cf2ec8d91f56d7a393c7","src/db.rs":"d9dd44501ee3b19c696d8830d3036f7bfe0e8ad7751d5a057f5d8295ebf0bd4f","src/error.rs":"3a1308e65440769d9435fc95528d4ef42994c84d88e1da04ba058491dea387c4","src/ingest.rs":"194eefe22aa012d863675005548041495e71a0adc95f5e59d1124b8990de156c","src/interest.rs":"e4369a1280867438bca12746f71288a03b4d5e180e156f4bc0335046012565f7","src/lib.rs":"6caa17ba27774941fce73602357acaf279645a19cd70459636e507b247da255f","src/ranker.rs":"e71414fe79ade26f3c79dceb5211af4f37984a9cded8c938dc1da8d8d28c2ad3","src/rs.rs":"e897443bd4d8d308effeae55a955d237d22e823b5d5abfb271a25b5796ebd419","src/schema.rs":"38ea82679da2729a571aad936f96469e732ec1c104d7c21fd869842f7a5f30a3","src/url_hash.rs":"2e908316fb70923644d1990dbf470d69ce2f5e99b0c5c3d95ec691590be8ffa5","test-data":"1ef2cd092d59e7e126cd4a514af983d449ed9f9c98708702fd237464a76c2b5e"},"package":null}

View File

@@ -37,7 +37,6 @@ path = "src/bin/generate-test-data.rs"
[dependencies] [dependencies]
anyhow = "1.0" anyhow = "1.0"
base64 = "0.21.2" base64 = "0.21.2"
log = "0.4"
md-5 = "0.10" md-5 = "0.10"
parking_lot = ">=0.11,<=0.12" parking_lot = ">=0.11,<=0.12"
rand = "0.8" rand = "0.8"

View File

@@ -28,7 +28,7 @@ pub fn ensure_interest_data_populated<C: RelevancyRemoteSettingsClient>(
db.read_write(move |dao| insert_interest_data(data, dao))?; db.read_write(move |dao| insert_interest_data(data, dao))?;
} }
Err(e) => { Err(e) => {
log::warn!("error fetching interest data: {e}"); crate::warn!("error fetching interest data: {e}");
return Err(Error::FetchInterestDataError); return Err(Error::FetchInterestDataError);
} }
} }

View File

@@ -27,6 +27,9 @@ use remote_settings::{RemoteSettingsClient, RemoteSettingsService};
pub use db::RelevancyDb; pub use db::RelevancyDb;
pub use error::{ApiResult, Error, RelevancyApiError, Result}; pub use error::{ApiResult, Error, RelevancyApiError, Result};
// reexport logging helpers.
pub use error_support::{debug, error, info, trace, warn};
pub use interest::{Interest, InterestVector}; pub use interest::{Interest, InterestVector};
pub use ranker::score; pub use ranker::score;
@@ -195,7 +198,7 @@ impl<C: rs::RelevancyRemoteSettingsClient> RelevancyStoreInner<C> {
let mut interest_vector = InterestVector::default(); let mut interest_vector = InterestVector::default();
for url in top_urls_by_frecency { for url in top_urls_by_frecency {
let interest_count = self.db.read(|dao| dao.get_url_interest_vector(&url))?; let interest_count = self.db.read(|dao| dao.get_url_interest_vector(&url))?;
log::trace!("classified: {url} {}", interest_count.summary()); crate::trace!("classified: {url} {}", interest_count.summary());
interest_vector = interest_vector + interest_count; interest_vector = interest_vector + interest_count;
} }
Ok(interest_vector) Ok(interest_vector)

File diff suppressed because one or more lines are too long

View File

@@ -45,7 +45,6 @@ path = "src/lib.rs"
[dependencies] [dependencies]
anyhow = "1.0" anyhow = "1.0"
camino = "1.0" camino = "1.0"
log = "0.4"
parking_lot = "0.12" parking_lot = "0.12"
regex = "1.9" regex = "1.9"
serde_json = "1" serde_json = "1"

View File

@@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::config::{BaseUrl, RemoteSettingsConfig}; use crate::config::{BaseUrl, RemoteSettingsConfig};
use crate::error::{Error, Result}; use crate::error::{debug, trace, Error, Result};
use crate::jexl_filter::JexlFilter; use crate::jexl_filter::JexlFilter;
#[cfg(feature = "signatures")] #[cfg(feature = "signatures")]
use crate::signatures; use crate::signatures;
@@ -309,7 +309,7 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
let collection_url = inner.api_client.collection_url(); let collection_url = inner.api_client.collection_url();
let timestamp = inner.storage.get_last_modified_timestamp(&collection_url)?; let timestamp = inner.storage.get_last_modified_timestamp(&collection_url)?;
let changeset = inner.api_client.fetch_changeset(timestamp)?; let changeset = inner.api_client.fetch_changeset(timestamp)?;
log::debug!( debug!(
"{0}: apply {1} change(s) locally.", "{0}: apply {1} change(s) locally.",
self.collection_name, self.collection_name,
changeset.changes.len() changeset.changes.len()
@@ -327,7 +327,7 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
self.perform_sync_operation()?; self.perform_sync_operation()?;
// Verify that inserted data has valid signature // Verify that inserted data has valid signature
if self.verify_signature().is_err() { if self.verify_signature().is_err() {
log::debug!( debug!(
"{0}: signature verification failed. Reset and retry.", "{0}: signature verification failed. Reset and retry.",
self.collection_name self.collection_name
); );
@@ -341,12 +341,12 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
.expect("Failed to reset storage after verification failure"); .expect("Failed to reset storage after verification failure");
})?; })?;
} }
log::trace!("{0}: sync done.", self.collection_name); trace!("{0}: sync done.", self.collection_name);
Ok(()) Ok(())
} }
fn reset_storage(&self) -> Result<()> { fn reset_storage(&self) -> Result<()> {
log::trace!("{0}: reset local storage.", self.collection_name); trace!("{0}: reset local storage.", self.collection_name);
let mut inner = self.inner.lock(); let mut inner = self.inner.lock();
let collection_url = inner.api_client.collection_url(); let collection_url = inner.api_client.collection_url();
// Clear existing storage // Clear existing storage
@@ -354,7 +354,7 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
// Load packaged data only for production // Load packaged data only for production
if inner.api_client.is_prod_server()? { if inner.api_client.is_prod_server()? {
if let Some(packaged_data) = self.load_packaged_data() { if let Some(packaged_data) = self.load_packaged_data() {
log::trace!("{0}: restore packaged dump.", self.collection_name); trace!("{0}: restore packaged dump.", self.collection_name);
inner.storage.insert_collection_content( inner.storage.insert_collection_content(
&collection_url, &collection_url,
&packaged_data.data, &packaged_data.data,
@@ -372,7 +372,7 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
#[cfg(not(feature = "signatures"))] #[cfg(not(feature = "signatures"))]
fn verify_signature(&self) -> Result<()> { fn verify_signature(&self) -> Result<()> {
log::debug!("{0}: signature verification skipped.", self.collection_name); debug!("{0}: signature verification skipped.", self.collection_name);
Ok(()) Ok(())
} }
@@ -415,15 +415,12 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
&expected_leaf_cname, &expected_leaf_cname,
) )
.inspect_err(|err| { .inspect_err(|err| {
log::debug!( debug!(
"{0}: bad signature ({1:?}) using certificate {2} and signer '{3}'", "{0}: bad signature ({1:?}) using certificate {2} and signer '{3}'",
self.collection_name, self.collection_name, err, &metadata.signature.x5u, expected_leaf_cname
err,
&metadata.signature.x5u,
expected_leaf_cname
); );
})?; })?;
log::trace!("{0}: signature verification success.", self.collection_name); trace!("{0}: signature verification success.", self.collection_name);
Ok(()) Ok(())
} }
_ => { _ => {
@@ -565,7 +562,7 @@ impl ViaductApiClient {
} }
fn make_request(&mut self, url: Url) -> Result<Response> { fn make_request(&mut self, url: Url) -> Result<Response> {
log::trace!("make_request: {url}"); trace!("make_request: {url}");
self.remote_state.ensure_no_backoff()?; self.remote_state.ensure_no_backoff()?;
let req = Request::get(url); let req = Request::get(url);

View File

@@ -10,6 +10,7 @@
use url::Url; use url::Url;
use crate::error::warn;
use crate::{ApiResult, Error, RemoteSettingsContext, Result}; use crate::{ApiResult, Error, RemoteSettingsContext, Result};
/// Remote settings configuration /// Remote settings configuration
@@ -86,7 +87,7 @@ impl RemoteSettingsServer {
Ok(url) => url, Ok(url) => url,
// The unwrap below will never fail, since prod is a hard-coded/valid URL. // The unwrap below will never fail, since prod is a hard-coded/valid URL.
Err(_) => { Err(_) => {
log::warn!("Invalid Custom URL: {}", self.raw_url()); warn!("Invalid Custom URL: {}", self.raw_url());
BaseUrl::parse(Self::Prod.raw_url()).unwrap() BaseUrl::parse(Self::Prod.raw_url()).unwrap()
} }
} }

View File

@@ -3,6 +3,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use error_support::{ErrorHandling, GetErrorHandling}; use error_support::{ErrorHandling, GetErrorHandling};
// reexport logging helpers.
pub use error_support::{debug, error, info, trace, warn};
pub type ApiResult<T> = std::result::Result<T, RemoteSettingsError>; pub type ApiResult<T> = std::result::Result<T, RemoteSettingsError>;
pub type Result<T> = std::result::Result<T, Error>; pub type Result<T> = std::result::Result<T, Error>;

View File

@@ -23,7 +23,7 @@ mod macros;
pub use client::{Attachment, RemoteSettingsRecord, RemoteSettingsResponse, RsJsonObject}; pub use client::{Attachment, RemoteSettingsRecord, RemoteSettingsResponse, RsJsonObject};
pub use config::{BaseUrl, RemoteSettingsConfig, RemoteSettingsConfig2, RemoteSettingsServer}; pub use config::{BaseUrl, RemoteSettingsConfig, RemoteSettingsConfig2, RemoteSettingsServer};
pub use context::RemoteSettingsContext; pub use context::RemoteSettingsContext;
pub use error::{ApiResult, RemoteSettingsError, Result}; pub use error::{trace, ApiResult, RemoteSettingsError, Result};
use client::Client; use client::Client;
use error::Error; use error::Error;
@@ -124,7 +124,7 @@ impl RemoteSettingsClient {
Ok(records) => records, Ok(records) => records,
Err(e) => { Err(e) => {
// Log/report the error // Log/report the error
log::trace!("get_records error: {e}"); trace!("get_records error: {e}");
convert_log_report_error(e); convert_log_report_error(e);
// Throw away the converted result and return None, there's nothing a client can // Throw away the converted result and return None, there's nothing a client can
// really do with an error except treat it as the None case // really do with an error except treat it as the None case

View File

@@ -8,6 +8,7 @@ use std::{
}; };
use camino::Utf8PathBuf; use camino::Utf8PathBuf;
use error_support::trace;
use parking_lot::Mutex; use parking_lot::Mutex;
use serde::Deserialize; use serde::Deserialize;
use viaduct::Request; use viaduct::Request;
@@ -101,13 +102,13 @@ impl RemoteSettingsService {
if let Some(server_last_modified) = change_map.get(&(collection_name, &bucket_name)) if let Some(server_last_modified) = change_map.get(&(collection_name, &bucket_name))
{ {
if client_last_modified != *server_last_modified { if client_last_modified != *server_last_modified {
log::trace!("skipping up-to-date collection: {collection_name}"); trace!("skipping up-to-date collection: {collection_name}");
continue; continue;
} }
} }
} }
if synced_collections.insert(collection_name.to_string()) { if synced_collections.insert(collection_name.to_string()) {
log::trace!("syncing collection: {collection_name}"); trace!("syncing collection: {collection_name}");
client.sync()?; client.sync()?;
} }
} }
@@ -171,7 +172,7 @@ impl RemoteSettingsServiceInner {
// notification. // notification.
url.query_pairs_mut().append_pair("_expected", "0"); url.query_pairs_mut().append_pair("_expected", "0");
let url = url.into_inner(); let url = url.into_inner();
log::trace!("make_request: {url}"); trace!("make_request: {url}");
self.remote_state.ensure_no_backoff()?; self.remote_state.ensure_no_backoff()?;
let req = Request::get(url); let req = Request::get(url);

View File

@@ -1 +1 @@
{"files":{"Cargo.toml":"ac0ac2103375f1c3906436b53627f88515da74864dd3f86ebb2a18952ba72b30","README.md":"d59a6ad6232a86a7bd3632ca62c44ba8bd466615c5d47ce0d836b270bac5562c","android/build.gradle":"e3b617d653aa0221f2229bb16c2fd635003fe82d0274c4b9a6f2d8154851985a","android/proguard-rules.pro":"1cf8c57e8f79c250b0af9c1a5a4edad71a5c348a79ab70243b6bae086c150ad2","android/src/main/AndroidManifest.xml":"0a05039a6124be0296764c2b0f41e863b5538d75e6164dd9ae945b59d983c318","src/configuration_overrides_types.rs":"220a5e12ee3deb309a1571c5820ec5132c959f56667c4c48f997bbe2be0c7eeb","src/configuration_types.rs":"a495199fc19cf9ce1aefe41058a38a0ffad4eb6f719fac11c11dcc3cfe4f234a","src/environment_matching.rs":"5a1ade9a900942c62e8740597528a34df6fb3fdb72c801a647a3386acd42fcc8","src/error.rs":"d3c1eda7a8da15446a321139d4d29dd9ceee99e916519690d5eb2d45ed628598","src/filter.rs":"88a39e594397708db888726cac00ad1e5b4a892c5a121c96cc11d20f41851b45","src/lib.rs":"9c83780a74048fbbc7bbba5706067b9dc5db2ae25a0cc751687d2738903723b4","src/selector.rs":"6585b4d487179353f4dc8bae396cc18a97e5fcde58f6afa09373cd04510daa03","src/sort_helpers.rs":"5bcae57c230e1d1123d3c5be1ae38b481c6a1fc5096bc0fdede6f7a7c8d27032","src/types.rs":"8721ccf9443b28435ba211d5705f4a6c82eb0354ba1d100045438800c7e2bf9a","uniffi.toml":"96f1cd569483ff59e3c73852f085a03889fa24a2ce20ff7a3003799a9f48a51e"},"package":null} {"files":{"Cargo.toml":"4a04033dec6fd9bb7e2232ee6a47a16a7a07ccf14f0a8870b6bea9a131d5697e","README.md":"d59a6ad6232a86a7bd3632ca62c44ba8bd466615c5d47ce0d836b270bac5562c","android/build.gradle":"e3b617d653aa0221f2229bb16c2fd635003fe82d0274c4b9a6f2d8154851985a","android/proguard-rules.pro":"1cf8c57e8f79c250b0af9c1a5a4edad71a5c348a79ab70243b6bae086c150ad2","android/src/main/AndroidManifest.xml":"0a05039a6124be0296764c2b0f41e863b5538d75e6164dd9ae945b59d983c318","src/configuration_overrides_types.rs":"220a5e12ee3deb309a1571c5820ec5132c959f56667c4c48f997bbe2be0c7eeb","src/configuration_types.rs":"a495199fc19cf9ce1aefe41058a38a0ffad4eb6f719fac11c11dcc3cfe4f234a","src/environment_matching.rs":"5a1ade9a900942c62e8740597528a34df6fb3fdb72c801a647a3386acd42fcc8","src/error.rs":"d3c1eda7a8da15446a321139d4d29dd9ceee99e916519690d5eb2d45ed628598","src/filter.rs":"88a39e594397708db888726cac00ad1e5b4a892c5a121c96cc11d20f41851b45","src/lib.rs":"9c83780a74048fbbc7bbba5706067b9dc5db2ae25a0cc751687d2738903723b4","src/selector.rs":"7aa3d7395502357df87fd5f8908a880d1e50d0dd02a00ac7876bdd42cd255498","src/sort_helpers.rs":"5bcae57c230e1d1123d3c5be1ae38b481c6a1fc5096bc0fdede6f7a7c8d27032","src/types.rs":"8721ccf9443b28435ba211d5705f4a6c82eb0354ba1d100045438800c7e2bf9a","uniffi.toml":"96f1cd569483ff59e3c73852f085a03889fa24a2ce20ff7a3003799a9f48a51e"},"package":null}

View File

@@ -24,9 +24,6 @@ description = "Library for search service related components."
readme = "README.md" readme = "README.md"
license = "MPL-2.0" license = "MPL-2.0"
[features]
enable_env_logger = ["env_logger"]
[lib] [lib]
name = "search" name = "search"
path = "src/lib.rs" path = "src/lib.rs"
@@ -36,11 +33,6 @@ parking_lot = ">=0.11,<=0.12"
serde_json = "1" serde_json = "1"
thiserror = "1" thiserror = "1"
[dependencies.env_logger]
version = "0.10.0"
optional = true
default-features = false
[dependencies.error-support] [dependencies.error-support]
path = "../support/error" path = "../support/error"
@@ -62,10 +54,9 @@ mockito = "0.31"
once_cell = "1.18.0" once_cell = "1.18.0"
pretty_assertions = "0.6" pretty_assertions = "0.6"
[dev-dependencies.env_logger] [dev-dependencies.error-support]
version = "0.10.0" path = "../support/error"
features = ["humantime"] features = ["testing"]
default-features = false
[dev-dependencies.viaduct-reqwest] [dev-dependencies.viaduct-reqwest]
path = "../support/viaduct-reqwest" path = "../support/viaduct-reqwest"

View File

@@ -160,7 +160,6 @@ impl SearchEngineSelector {
mod tests { mod tests {
use super::*; use super::*;
use crate::{types::*, SearchApiError}; use crate::{types::*, SearchApiError};
use env_logger;
use mockito::mock; use mockito::mock;
use pretty_assertions::assert_eq; use pretty_assertions::assert_eq;
use remote_settings::{RemoteSettingsConfig2, RemoteSettingsContext, RemoteSettingsServer}; use remote_settings::{RemoteSettingsConfig2, RemoteSettingsContext, RemoteSettingsServer};
@@ -1890,7 +1889,7 @@ mod tests {
should_apply_overrides: bool, should_apply_overrides: bool,
expect_sync_successful: bool, expect_sync_successful: bool,
) -> Arc<SearchEngineSelector> { ) -> Arc<SearchEngineSelector> {
let _ = env_logger::builder().try_init(); error_support::init_for_tests();
viaduct_reqwest::use_reqwest_backend(); viaduct_reqwest::use_reqwest_backend();
let config = RemoteSettingsConfig2 { let config = RemoteSettingsConfig2 {

View File

@@ -1 +1 @@
{"files":{"Cargo.toml":"00ffcb582afeb58d8249fc5fa2c453dab9ce3e305cb49410e21ca9a8561ce16e","src/conn_ext.rs":"1280fb1f06b74ed312e73f34c4fd86f538411c4b3d4eeccb631c80d02e295645","src/debug_tools.rs":"bece2bc3d35379b81ea2f942a0a3e909e0ab0553656505904745548eacaf402a","src/each_chunk.rs":"0b4de829ccaf06b743d0ee5bce766399d841e12592cd00d22605b75a5ae6dbd0","src/lazy.rs":"a96b4f4ec572538b49cdfa8fee981dcf5143a5f51163fb8a573d3ac128df70f9","src/lib.rs":"cb501b3b0482d549cbe6f0350d7321ed315269ccd75215af2582aae340fe354b","src/maybe_cached.rs":"0b18425595055883a98807fbd62ff27a79c18af34e7cb3439f8c3438463ef2dd","src/open_database.rs":"0e50c02b3a052c6b3cdc742409d46fb40a5939080c1f7ec1684241dc2b02f269","src/repeat.rs":"3dad3cbc6f47fc7598fc7b0fbf79b9c915322396d1f64d3d09651d100d428351"},"package":null} {"files":{"Cargo.toml":"f6d6946d71afceda2e19155bd61d5563440f81d67eca67857834ea64aeaf1cc4","src/conn_ext.rs":"8efb8c9fd49f9d7444ef5e7ce426861658cecaf884a50c5e7673bbe92941558f","src/debug_tools.rs":"bece2bc3d35379b81ea2f942a0a3e909e0ab0553656505904745548eacaf402a","src/each_chunk.rs":"0b4de829ccaf06b743d0ee5bce766399d841e12592cd00d22605b75a5ae6dbd0","src/lazy.rs":"a96b4f4ec572538b49cdfa8fee981dcf5143a5f51163fb8a573d3ac128df70f9","src/lib.rs":"c3a83ab608724be7da83ca17a08b11ab88e4d5ce632509fd8a498a6e9fb969e8","src/maybe_cached.rs":"0b18425595055883a98807fbd62ff27a79c18af34e7cb3439f8c3438463ef2dd","src/open_database.rs":"c2ca3bf49dba5b8ae7e2be0fffebd5e440eacbf0748d93572c151259c66b807c","src/repeat.rs":"3dad3cbc6f47fc7598fc7b0fbf79b9c915322396d1f64d3d09651d100d428351"},"package":null}

View File

@@ -33,11 +33,13 @@ path = "src/lib.rs"
[dependencies] [dependencies]
lazy_static = "1.4" lazy_static = "1.4"
log = "0.4"
parking_lot = ">=0.11,<=0.12" parking_lot = ">=0.11,<=0.12"
tempfile = "3.1.0" tempfile = "3.1.0"
thiserror = "1.0" thiserror = "1.0"
[dependencies.error-support]
path = "../error"
[dependencies.interrupt-support] [dependencies.interrupt-support]
path = "../interrupt" path = "../interrupt"
@@ -50,6 +52,6 @@ features = [
"unlock_notify", "unlock_notify",
] ]
[dev-dependencies.env_logger] [dev-dependencies.error-support]
version = "0.10" path = "../error"
default-features = false features = ["testing"]

View File

@@ -12,6 +12,7 @@ use std::ops::Deref;
use std::time::Instant; use std::time::Instant;
use crate::maybe_cached::MaybeCached; use crate::maybe_cached::MaybeCached;
use crate::{debug, warn};
/// This trait exists so that we can use these helpers on `rusqlite::{Transaction, Connection}`. /// This trait exists so that we can use these helpers on `rusqlite::{Transaction, Connection}`.
/// Note that you must import ConnExt in order to call these methods on anything. /// Note that you must import ConnExt in order to call these methods on anything.
@@ -332,19 +333,19 @@ impl<'conn> UncheckedTransaction<'conn> {
/// Consumes and commits an unchecked transaction. /// Consumes and commits an unchecked transaction.
pub fn commit(mut self) -> SqlResult<()> { pub fn commit(mut self) -> SqlResult<()> {
if self.finished { if self.finished {
log::warn!("ignoring request to commit an already finished transaction"); warn!("ignoring request to commit an already finished transaction");
return Ok(()); return Ok(());
} }
self.finished = true; self.finished = true;
self.conn.execute_batch("COMMIT")?; self.conn.execute_batch("COMMIT")?;
log::debug!("Transaction commited after {:?}", self.started_at.elapsed()); debug!("Transaction commited after {:?}", self.started_at.elapsed());
Ok(()) Ok(())
} }
/// Consumes and rolls back an unchecked transaction. /// Consumes and rolls back an unchecked transaction.
pub fn rollback(mut self) -> SqlResult<()> { pub fn rollback(mut self) -> SqlResult<()> {
if self.finished { if self.finished {
log::warn!("ignoring request to rollback an already finished transaction"); warn!("ignoring request to rollback an already finished transaction");
return Ok(()); return Ok(());
} }
self.rollback_() self.rollback_()
@@ -377,7 +378,7 @@ impl Deref for UncheckedTransaction<'_> {
impl Drop for UncheckedTransaction<'_> { impl Drop for UncheckedTransaction<'_> {
fn drop(&mut self) { fn drop(&mut self) {
if let Err(e) = self.finish_() { if let Err(e) = self.finish_() {
log::warn!("Error dropping an unchecked transaction: {}", e); warn!("Error dropping an unchecked transaction: {}", e);
} }
} }
} }

View File

@@ -30,6 +30,9 @@ pub use lazy::*;
pub use maybe_cached::*; pub use maybe_cached::*;
pub use repeat::*; pub use repeat::*;
// reexport logging helpers.
use error_support::{debug, info, warn};
/// In PRAGMA foo='bar', `'bar'` must be a constant string (it cannot be a /// In PRAGMA foo='bar', `'bar'` must be a constant string (it cannot be a
/// bound parameter), so we need to escape manually. According to /// bound parameter), so we need to escape manually. According to
/// <https://www.sqlite.org/faq.html>, the only character that must be escaped is /// <https://www.sqlite.org/faq.html>, the only character that must be escaped is

View File

@@ -41,6 +41,7 @@ use rusqlite::{
use thiserror::Error; use thiserror::Error;
use crate::ConnExt; use crate::ConnExt;
use crate::{debug, info, warn};
#[derive(Error, Debug)] #[derive(Error, Debug)]
pub enum Error { pub enum Error {
@@ -143,18 +144,18 @@ fn do_open_database_with_flags<CI: ConnectionInitializer, P: AsRef<Path>>(
connection_initializer: &CI, connection_initializer: &CI,
) -> Result<Connection> { ) -> Result<Connection> {
// Try running the migration logic with an existing file // Try running the migration logic with an existing file
log::debug!("{}: opening database", CI::NAME); debug!("{}: opening database", CI::NAME);
let mut conn = Connection::open_with_flags(path, open_flags)?; let mut conn = Connection::open_with_flags(path, open_flags)?;
log::debug!("{}: checking if initialization is necessary", CI::NAME); debug!("{}: checking if initialization is necessary", CI::NAME);
let db_empty = is_db_empty(&conn)?; let db_empty = is_db_empty(&conn)?;
log::debug!("{}: preparing", CI::NAME); debug!("{}: preparing", CI::NAME);
connection_initializer.prepare(&conn, db_empty)?; connection_initializer.prepare(&conn, db_empty)?;
if open_flags.contains(OpenFlags::SQLITE_OPEN_READ_WRITE) { if open_flags.contains(OpenFlags::SQLITE_OPEN_READ_WRITE) {
let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate)?; let tx = conn.transaction_with_behavior(TransactionBehavior::Immediate)?;
if db_empty { if db_empty {
log::debug!("{}: initializing new database", CI::NAME); debug!("{}: initializing new database", CI::NAME);
connection_initializer.init(&tx)?; connection_initializer.init(&tx)?;
} else { } else {
let mut current_version = get_schema_version(&tx)?; let mut current_version = get_schema_version(&tx)?;
@@ -162,7 +163,7 @@ fn do_open_database_with_flags<CI: ConnectionInitializer, P: AsRef<Path>>(
return Err(Error::IncompatibleVersion(current_version)); return Err(Error::IncompatibleVersion(current_version));
} }
while current_version < CI::END_VERSION { while current_version < CI::END_VERSION {
log::debug!( debug!(
"{}: upgrading database to {}", "{}: upgrading database to {}",
CI::NAME, CI::NAME,
current_version + 1 current_version + 1
@@ -171,7 +172,7 @@ fn do_open_database_with_flags<CI: ConnectionInitializer, P: AsRef<Path>>(
current_version += 1; current_version += 1;
} }
} }
log::debug!("{}: finishing writable database open", CI::NAME); debug!("{}: finishing writable database open", CI::NAME);
connection_initializer.finish(&tx)?; connection_initializer.finish(&tx)?;
set_schema_version(&tx, CI::END_VERSION)?; set_schema_version(&tx, CI::END_VERSION)?;
tx.commit()?; tx.commit()?;
@@ -183,10 +184,10 @@ fn do_open_database_with_flags<CI: ConnectionInitializer, P: AsRef<Path>>(
get_schema_version(&conn)? == CI::END_VERSION, get_schema_version(&conn)? == CI::END_VERSION,
"existing writer must have migrated" "existing writer must have migrated"
); );
log::debug!("{}: finishing readonly database open", CI::NAME); debug!("{}: finishing readonly database open", CI::NAME);
connection_initializer.finish(&conn)?; connection_initializer.finish(&conn)?;
} }
log::debug!("{}: database open successful", CI::NAME); debug!("{}: database open successful", CI::NAME);
Ok(conn) Ok(conn)
} }
@@ -211,15 +212,15 @@ fn try_handle_db_failure<CI: ConnectionInitializer, P: AsRef<Path>>(
if !open_flags.contains(OpenFlags::SQLITE_OPEN_CREATE) if !open_flags.contains(OpenFlags::SQLITE_OPEN_CREATE)
&& matches!(err, Error::SqlError(rusqlite::Error::SqliteFailure(code, _)) if code.code == rusqlite::ErrorCode::CannotOpen) && matches!(err, Error::SqlError(rusqlite::Error::SqliteFailure(code, _)) if code.code == rusqlite::ErrorCode::CannotOpen)
{ {
log::info!( info!(
"{}: database doesn't exist, but we weren't requested to create it", "{}: database doesn't exist, but we weren't requested to create it",
CI::NAME CI::NAME
); );
return Err(err); return Err(err);
} }
log::warn!("{}: database operation failed: {}", CI::NAME, err); warn!("{}: database operation failed: {}", CI::NAME, err);
if !open_flags.contains(OpenFlags::SQLITE_OPEN_READ_WRITE) { if !open_flags.contains(OpenFlags::SQLITE_OPEN_READ_WRITE) {
log::warn!( warn!(
"{}: not attempting recovery as this is a read-only connection request", "{}: not attempting recovery as this is a read-only connection request",
CI::NAME CI::NAME
); );
@@ -228,7 +229,7 @@ fn try_handle_db_failure<CI: ConnectionInitializer, P: AsRef<Path>>(
let delete = matches!(err, Error::Corrupt); let delete = matches!(err, Error::Corrupt);
if delete { if delete {
log::info!( info!(
"{}: the database is fatally damaged; deleting and starting fresh", "{}: the database is fatally damaged; deleting and starting fresh",
CI::NAME CI::NAME
); );

View File

@@ -1 +1 @@
{"files":{"Cargo.toml":"922b2e4d85f325dbef99d5e439558a75dbfda82e1d263c2dd3bfadac9879df18","README.md":"5e28baf874b643d756228bdab345e287bf107d3182dfe6a18aafadcc4b9a3fc9","benches/benchmark_all.rs":"5909dfb1e62793afb1f2bc15b75914527a4d14fce6796307c04a309e45c0598c","metrics.yaml":"0540ab2271aeab7f07335c7ceec12acde942995f9dcb3c29070489aa61899d56","src/benchmarks/README.md":"ccee8dbddba8762d0453fa855bd6984137b224b8c019f3dd8e86a3c303f51d71","src/benchmarks/client.rs":"e5897d4e2eda06809fa6dc6db4e780b9ef266f613fb113aa6613b83f7005dd0b","src/benchmarks/geoname.rs":"00fab05cf9465cf8e22e143cde75a81885411001b240af00efda4071975d0563","src/benchmarks/ingest.rs":"1f3b5eca704c51bc8f972e7a3492a518516461e5834f97a5f7d1855a048ab16b","src/benchmarks/mod.rs":"2c9a39b7a5144674d2475f4d7d69d77c4545f9aa5f123968cb32574e76f10b1a","src/benchmarks/query.rs":"d54946063e72cf98e7f46d94665c17c66af637774c2bb50cd5798dbe63d74f3c","src/bin/debug_ingestion_sizes.rs":"ce6e810be7b3fc19e826d75b622b82cfab5a1a99397a6d0833c2c4eebff2d364","src/config.rs":"0ca876e845841bb6429862c0904c82265003f53b55aea053fac60aed278586a7","src/db.rs":"c22aab621ae8c1b70595c2073e62ff766272400be13f393327c27451bce10498","src/error.rs":"e2ef3ec0e0b2b8ecbb8f2f1717d4cb753af06913b8395d086b7643098ad100a7","src/fakespot.rs":"f501c9fe5296e7c130a9fcb532b861465717652cb5ef688230bc7a3b94df91b1","src/geoname.rs":"77376dbc7d06532a7797a93b863f150317df7f31d9200d375c8ea489ac8bee6f","src/lib.rs":"a4c0989a01a7c13184049c1f11bc7813cd3cbfb6354fcca1f5a7204e45a0dc9c","src/metrics.rs":"871f0d834efbbc9e26d61f66fa31f0021dcf41444746cd7c082f93ba9628e399","src/pocket.rs":"1316668840ec9b4ea886223921dc9d3b5a1731d1a5206c0b1089f2a6c45c1b7b","src/provider.rs":"e85d606e98a8ba37557072f91c6906b1a4d7c5586a9913bf3570ef25106b007f","src/query.rs":"66f229272c9245eb8ee0cab237071627aec599f145f64da8894bcaeb1ed7c6f9","src/rs.rs":"3e2310d069b4cbc7447c2bb625f03bb49439b218a1e8f04190015a31cde22842","src/schema.rs":"68dbdc960097cc3421247cd9f705f6dcf74c9d357b37a5824b80e37837cbf053","src/store.rs":"76e6e2134d1d0e6f8dcf30ed65fe18eb093531bdddec461ad708b1eb4ac6a01c","src/suggestion.rs":"33dd2fb8e966a72f9843476bc006c8dfb1326ed1268ad88aa91801356f2623a1","src/testing/client.rs":"47a32fd84c733001f11e8bfff94dc8c060b6b0780346dca5ddc7a5f5489c1d85","src/testing/data.rs":"ad710b31532a9540491a73cba33a54db02e85dd5ec0a4f2260430f144c3d7380","src/testing/mod.rs":"fe930be25229517831111fb6d7796ae957ec0eb1b9a190c59cf538ac41ae27f5","src/util.rs":"52c6ec405637afa2d1a89f29fbbb7dcc341546b6deb97d326c4490bbf8713cb0","src/weather.rs":"7cc9167dcdfca49d6ad91eba6fba4d5fd49f45052f25a7fe3ad6749d3e6783fb","src/yelp.rs":"1fe3b7eb6b3f7462e9758b6eb62457dfa26f7549a8290cdff7637d2fb3ffea4f","uniffi.toml":"8205e4679ac26d53e70af0f85c013fd27cda1119f4322aebf5f2b9403d45a611"},"package":null} {"files":{"Cargo.toml":"57d202b48263d0ea4070807c66f26260497fb947f2099b7275a3e28463ad28ca","README.md":"5e28baf874b643d756228bdab345e287bf107d3182dfe6a18aafadcc4b9a3fc9","benches/benchmark_all.rs":"5909dfb1e62793afb1f2bc15b75914527a4d14fce6796307c04a309e45c0598c","metrics.yaml":"0540ab2271aeab7f07335c7ceec12acde942995f9dcb3c29070489aa61899d56","src/benchmarks/README.md":"ccee8dbddba8762d0453fa855bd6984137b224b8c019f3dd8e86a3c303f51d71","src/benchmarks/client.rs":"e5897d4e2eda06809fa6dc6db4e780b9ef266f613fb113aa6613b83f7005dd0b","src/benchmarks/geoname.rs":"fb4e8eb48e27879fe3f35d6d2415f0237b087b51b89ee03fb50fa635e1c1a3b5","src/benchmarks/ingest.rs":"1f3b5eca704c51bc8f972e7a3492a518516461e5834f97a5f7d1855a048ab16b","src/benchmarks/mod.rs":"2c9a39b7a5144674d2475f4d7d69d77c4545f9aa5f123968cb32574e76f10b1a","src/benchmarks/query.rs":"d54946063e72cf98e7f46d94665c17c66af637774c2bb50cd5798dbe63d74f3c","src/bin/debug_ingestion_sizes.rs":"ce6e810be7b3fc19e826d75b622b82cfab5a1a99397a6d0833c2c4eebff2d364","src/config.rs":"0ca876e845841bb6429862c0904c82265003f53b55aea053fac60aed278586a7","src/db.rs":"dbb2923192d062ee6f20e780ce5544afc999b01ab3eea81b46abe9e412750c4d","src/error.rs":"e2ef3ec0e0b2b8ecbb8f2f1717d4cb753af06913b8395d086b7643098ad100a7","src/fakespot.rs":"f501c9fe5296e7c130a9fcb532b861465717652cb5ef688230bc7a3b94df91b1","src/geoname.rs":"24d4e37f67ece92e5d268ecca22bdeef65a674db282530f248c1011c5ca77449","src/lib.rs":"3b8ccb1e93d214d27b7ef77326da40a0b85621f32ba143664f55e4fb2a665e7c","src/metrics.rs":"871f0d834efbbc9e26d61f66fa31f0021dcf41444746cd7c082f93ba9628e399","src/pocket.rs":"1316668840ec9b4ea886223921dc9d3b5a1731d1a5206c0b1089f2a6c45c1b7b","src/provider.rs":"6ec6b207eca1f4f9553fcb2d57d3a05d87279e81e09def5abdc5219ce838a4e9","src/query.rs":"66f229272c9245eb8ee0cab237071627aec599f145f64da8894bcaeb1ed7c6f9","src/rs.rs":"42b5a81c33df7dbabae5cb9756b7d947553545ae76e3dbd5eb3dcced3a31ce68","src/schema.rs":"a925dd09c609c2bc2338242a3f057e884e9add6478b15180f61bb53adda6574d","src/store.rs":"66d7f31b9d8713b11ecf389728beda3d9c5b0ea8402141b7ddf198372a1111f4","src/suggestion.rs":"6e902501624b62d9bde1c771eded87e7620540c691667d0ac913d8c21c183e9a","src/testing/client.rs":"47a32fd84c733001f11e8bfff94dc8c060b6b0780346dca5ddc7a5f5489c1d85","src/testing/data.rs":"ad710b31532a9540491a73cba33a54db02e85dd5ec0a4f2260430f144c3d7380","src/testing/mod.rs":"bd02746cdc6e2d54a7691b3ffbccb1a81b66b717871c34dd1a8b6088dc125185","src/util.rs":"52c6ec405637afa2d1a89f29fbbb7dcc341546b6deb97d326c4490bbf8713cb0","src/weather.rs":"c3ab5e8a4e3dd7be0a4bb37848db0b6295050d151ade9c935a7fb4c7e12a1367","src/yelp.rs":"1fe3b7eb6b3f7462e9758b6eb62457dfa26f7549a8290cdff7637d2fb3ffea4f","uniffi.toml":"8205e4679ac26d53e70af0f85c013fd27cda1119f4322aebf5f2b9403d45a611"},"package":null}

View File

@@ -54,12 +54,13 @@ required-features = ["benchmark_api"]
anyhow = "1.0" anyhow = "1.0"
chrono = "0.4" chrono = "0.4"
extend = "1.1" extend = "1.1"
log = "0.4"
once_cell = "1.5" once_cell = "1.5"
parking_lot = ">=0.11,<=0.12" parking_lot = ">=0.11,<=0.12"
rmp-serde = "1.3" rmp-serde = "1.3"
serde_json = "1" serde_json = "1"
thiserror = "1" thiserror = "1"
unicase = "2.6"
unicode-normalization = "0.1"
[dependencies.error-support] [dependencies.error-support]
path = "../support/error" path = "../support/error"
@@ -76,6 +77,7 @@ features = [
"functions", "functions",
"bundled", "bundled",
"load_extension", "load_extension",
"collation",
] ]
[dependencies.serde] [dependencies.serde]
@@ -107,10 +109,11 @@ optional = true
criterion = "0.5" criterion = "0.5"
expect-test = "1.4" expect-test = "1.4"
hex = "0.4" hex = "0.4"
itertools = "0.14"
[dev-dependencies.env_logger] [dev-dependencies.error-support]
version = "0.10" path = "../support/error"
default-features = false features = ["testing"]
[dev-dependencies.rc_crypto] [dev-dependencies.rc_crypto]
path = "../support/rc_crypto" path = "../support/rc_crypto"

View File

@@ -17,7 +17,6 @@ pub struct GeonameBenchmark {
pub struct FetchGeonamesArgs { pub struct FetchGeonamesArgs {
query: &'static str, query: &'static str,
match_name_prefix: bool, match_name_prefix: bool,
geoname_type: Option<GeonameType>,
filter: Option<Vec<Geoname>>, filter: Option<Vec<Geoname>>,
} }
@@ -49,7 +48,6 @@ impl BenchmarkWithInput for GeonameBenchmark {
.fetch_geonames( .fetch_geonames(
i_input.fetch_args.query, i_input.fetch_args.query,
i_input.fetch_args.match_name_prefix, i_input.fetch_args.match_name_prefix,
i_input.fetch_args.geoname_type,
i_input.fetch_args.filter, i_input.fetch_args.filter,
) )
.unwrap_or_else(|e| panic!("Error fetching geonames: {e}")); .unwrap_or_else(|e| panic!("Error fetching geonames: {e}"));
@@ -67,13 +65,16 @@ impl BenchmarkWithInput for GeonameBenchmark {
pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> { pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
let ny_state = Geoname { let ny_state = Geoname {
geoname_id: 8, geoname_id: 5128638,
geoname_type: GeonameType::AdminDivision { level: 1 },
name: "New York".to_string(), name: "New York".to_string(),
latitude: 43.00035, feature_class: "A".to_string(),
longitude: -75.4999, feature_code: "ADM1".to_string(),
country_code: "US".to_string(), country_code: "US".to_string(),
admin1_code: "NY".to_string(), admin_division_codes: [(1, "NY".to_string())].into(),
population: 19274244, population: 19274244,
latitude: "43.00035".to_string(),
longitude: "-75.4999".to_string(),
}; };
vec![ vec![
@@ -84,7 +85,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "nomatch", query: "nomatch",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: false, should_match: false,
@@ -96,7 +96,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "no match", query: "no match",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: false, should_match: false,
@@ -108,7 +107,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "no match either", query: "no match either",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: false, should_match: false,
@@ -120,7 +118,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "this is a very long string that does not match anything in the geonames database but it sure is very long", query: "this is a very long string that does not match anything in the geonames database but it sure is very long",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: false, should_match: false,
@@ -134,7 +131,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "nomatch", query: "nomatch",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: false, should_match: false,
@@ -146,7 +142,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "no match", query: "no match",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: false, should_match: false,
@@ -158,7 +153,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "no match either", query: "no match either",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: false, should_match: false,
@@ -170,7 +164,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "this is a very long string that does not match anything in the geonames database but it sure is very long", query: "this is a very long string that does not match anything in the geonames database but it sure is very long",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: false, should_match: false,
@@ -184,7 +177,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "ny", query: "ny",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -196,7 +188,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "nyc", query: "nyc",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -208,7 +199,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "ca", query: "ca",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -220,7 +210,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "pdx", query: "pdx",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -232,7 +221,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "roc", query: "roc",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -246,7 +234,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "ny", query: "ny",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -258,7 +245,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "nyc", query: "nyc",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -270,7 +256,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "ca", query: "ca",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -282,7 +267,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "pdx", query: "pdx",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -294,7 +278,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "roc", query: "roc",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -308,7 +291,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "new york", query: "new york",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -320,7 +302,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "rochester", query: "rochester",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -334,7 +315,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "new york", query: "new york",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -346,33 +326,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "rochester", query: "rochester",
match_name_prefix: true, match_name_prefix: true,
geoname_type: None,
filter: None,
},
should_match: true,
}
),
// restricting to a geoname type
(
"geoname-fetch-type-city-ny",
GeonameBenchmark {
args: FetchGeonamesArgs {
query: "ny",
match_name_prefix: false,
geoname_type: Some(GeonameType::City),
filter: None,
},
should_match: true,
}
),
(
"geoname-fetch-type-region-ny",
GeonameBenchmark {
args: FetchGeonamesArgs {
query: "ny",
match_name_prefix: false,
geoname_type: Some(GeonameType::Region),
filter: None, filter: None,
}, },
should_match: true, should_match: true,
@@ -386,59 +339,6 @@ pub fn all_benchmarks() -> Vec<(&'static str, GeonameBenchmark)> {
args: FetchGeonamesArgs { args: FetchGeonamesArgs {
query: "ny", query: "ny",
match_name_prefix: false, match_name_prefix: false,
geoname_type: None,
filter: Some(vec![ny_state.clone()]),
},
should_match: true,
}
),
// restricting to a geoname type + filtering
(
"geoname-fetch-type-filter-city-ny",
GeonameBenchmark {
args: FetchGeonamesArgs {
query: "ny",
match_name_prefix: false,
geoname_type: Some(GeonameType::City),
filter: Some(vec![ny_state.clone()]),
},
should_match: true,
}
),
(
"geoname-fetch-type-filter-region-ny",
GeonameBenchmark {
args: FetchGeonamesArgs {
query: "ny",
match_name_prefix: false,
geoname_type: Some(GeonameType::Region),
filter: Some(vec![ny_state.clone()]),
},
should_match: true,
}
),
// restricting to a geoname type + filtering w/ prefix matching
(
"geoname-fetch-type-filter-prefix-city-ny",
GeonameBenchmark {
args: FetchGeonamesArgs {
query: "ny",
match_name_prefix: true,
geoname_type: Some(GeonameType::City),
filter: Some(vec![ny_state.clone()]),
},
should_match: true,
}
),
(
"geoname-fetch-type-filter-prefix-region-ny",
GeonameBenchmark {
args: FetchGeonamesArgs {
query: "ny",
match_name_prefix: true,
geoname_type: Some(GeonameType::Region),
filter: Some(vec![ny_state.clone()]), filter: Some(vec![ny_state.clone()]),
}, },
should_match: true, should_match: true,

View File

@@ -26,6 +26,7 @@ use crate::{
DownloadedAmoSuggestion, DownloadedAmpSuggestion, DownloadedDynamicRecord, DownloadedAmoSuggestion, DownloadedAmpSuggestion, DownloadedDynamicRecord,
DownloadedDynamicSuggestion, DownloadedFakespotSuggestion, DownloadedMdnSuggestion, DownloadedDynamicSuggestion, DownloadedFakespotSuggestion, DownloadedMdnSuggestion,
DownloadedPocketSuggestion, DownloadedWikipediaSuggestion, Record, SuggestRecordId, DownloadedPocketSuggestion, DownloadedWikipediaSuggestion, Record, SuggestRecordId,
SuggestRecordType,
}, },
schema::{clear_database, SuggestConnectionInitializer}, schema::{clear_database, SuggestConnectionInitializer},
suggestion::{cook_raw_suggestion_url, FtsMatchInfo, Suggestion}, suggestion::{cook_raw_suggestion_url, FtsMatchInfo, Suggestion},
@@ -1365,6 +1366,11 @@ impl<'a> SuggestDao<'a> {
named_params! { ":record_id": record_id.as_str() }, named_params! { ":record_id": record_id.as_str() },
)?; )?;
self.scope.err_if_interrupted()?; self.scope.err_if_interrupted()?;
self.conn.execute_cached(
"DELETE FROM geonames_alternates WHERE record_id = :record_id",
named_params! { ":record_id": record_id.as_str() },
)?;
self.scope.err_if_interrupted()?;
self.conn.execute_cached( self.conn.execute_cached(
"DELETE FROM geonames_metrics WHERE record_id = :record_id", "DELETE FROM geonames_metrics WHERE record_id = :record_id",
named_params! { ":record_id": record_id.as_str() }, named_params! { ":record_id": record_id.as_str() },
@@ -1446,6 +1452,32 @@ impl<'a> SuggestDao<'a> {
self.get_meta::<String>(&provider_config_meta_key(provider))? self.get_meta::<String>(&provider_config_meta_key(provider))?
.map_or_else(|| Ok(None), |json| Ok(serde_json::from_str(&json)?)) .map_or_else(|| Ok(None), |json| Ok(serde_json::from_str(&json)?))
} }
/// Gets keywords metrics for a record type.
pub fn get_keywords_metrics(&self, record_type: SuggestRecordType) -> Result<KeywordsMetrics> {
let data = self.conn.try_query_row(
r#"
SELECT
max(max_len) AS len,
max(max_word_count) AS word_count
FROM
keywords_metrics
WHERE
record_type = :record_type
"#,
named_params! {
":record_type": record_type,
},
|row| -> Result<(usize, usize)> { Ok((row.get("len")?, row.get("word_count")?)) },
true, // cache
)?;
Ok(data
.map(|(max_len, max_word_count)| KeywordsMetrics {
max_len,
max_word_count,
})
.unwrap_or_default())
}
} }
#[derive(Debug, PartialEq, Eq, Hash)] #[derive(Debug, PartialEq, Eq, Hash)]
@@ -1839,32 +1871,68 @@ impl<'conn> PrefixKeywordInsertStatement<'conn> {
} }
} }
pub(crate) struct KeywordMetricsInsertStatement<'conn>(rusqlite::Statement<'conn>); #[derive(Debug, Default)]
pub(crate) struct KeywordsMetrics {
pub(crate) max_len: usize,
pub(crate) max_word_count: usize,
}
impl<'conn> KeywordMetricsInsertStatement<'conn> { /// This can be used to update metrics as keywords are inserted into the DB.
pub(crate) fn new(conn: &'conn Connection) -> Result<Self> { /// Create a `KeywordsMetricsUpdater`, call `update` on it as each keyword is
Ok(Self(conn.prepare( /// inserted, and then call `finish` after all keywords have been inserted.
"INSERT INTO keywords_metrics( pub(crate) struct KeywordsMetricsUpdater {
record_id, pub(crate) max_len: usize,
provider, pub(crate) max_word_count: usize,
max_length, }
max_word_count
) impl KeywordsMetricsUpdater {
VALUES(?, ?, ?, ?) pub(crate) fn new() -> Self {
", Self {
)?)) max_len: 0,
max_word_count: 0,
}
} }
pub(crate) fn execute( pub(crate) fn update(&mut self, keyword: &str) {
&mut self, self.max_len = std::cmp::max(self.max_len, keyword.len());
self.max_word_count =
std::cmp::max(self.max_word_count, keyword.split_whitespace().count());
}
/// Inserts keywords metrics into the database. This assumes you have a
/// cache object inside the `cache` cell that caches the metrics. It will be
/// cleared since it will be invalidated by the metrics update.
pub(crate) fn finish<T>(
&self,
conn: &Connection,
record_id: &SuggestRecordId, record_id: &SuggestRecordId,
provider: SuggestionProvider, record_type: SuggestRecordType,
max_len: usize, cache: &mut OnceCell<T>,
max_word_count: usize,
) -> Result<()> { ) -> Result<()> {
self.0 let mut insert_stmt = conn.prepare(
.execute((record_id.as_str(), provider, max_len, max_word_count)) r#"
.with_context("keyword metrics insert")?; INSERT OR REPLACE INTO keywords_metrics(
record_id,
record_type,
max_len,
max_word_count
)
VALUES(?, ?, ?, ?)
"#,
)?;
insert_stmt
.execute((
record_id.as_str(),
record_type,
self.max_len,
self.max_word_count,
))
.with_context("keywords metrics insert")?;
// We just made some insertions that might invalidate the data in the
// cache. Clear it so it's repopulated the next time it's accessed.
cache.take();
Ok(()) Ok(())
} }
} }

File diff suppressed because it is too large Load Diff

View File

@@ -26,7 +26,7 @@ mod yelp;
pub use config::{SuggestGlobalConfig, SuggestProviderConfig}; pub use config::{SuggestGlobalConfig, SuggestProviderConfig};
pub use error::{Error, SuggestApiError}; pub use error::{Error, SuggestApiError};
pub use geoname::{Geoname, GeonameMatch, GeonameType}; pub use geoname::{Geoname, GeonameMatch};
pub use metrics::{LabeledTimingSample, SuggestIngestionMetrics}; pub use metrics::{LabeledTimingSample, SuggestIngestionMetrics};
pub use provider::{AmpMatchingStrategy, SuggestionProvider, SuggestionProviderConstraints}; pub use provider::{AmpMatchingStrategy, SuggestionProvider, SuggestionProviderConstraints};
pub use query::{QueryWithMetricsResult, SuggestionQuery}; pub use query::{QueryWithMetricsResult, SuggestionQuery};

View File

@@ -140,11 +140,18 @@ impl SuggestionProvider {
)])), )])),
Self::Yelp => Some(HashMap::from([( Self::Yelp => Some(HashMap::from([(
Collection::Other, Collection::Other,
HashSet::from([SuggestRecordType::Icon, SuggestRecordType::Geonames]), HashSet::from([
SuggestRecordType::Icon,
SuggestRecordType::Geonames,
SuggestRecordType::GeonamesAlternates,
]),
)])), )])),
Self::Weather => Some(HashMap::from([( Self::Weather => Some(HashMap::from([(
Collection::Other, Collection::Other,
HashSet::from([SuggestRecordType::Geonames]), HashSet::from([
SuggestRecordType::Geonames,
SuggestRecordType::GeonamesAlternates,
]),
)])), )])),
Self::Fakespot => Some(HashMap::from([( Self::Fakespot => Some(HashMap::from([(
Collection::Fakespot, Collection::Fakespot,

View File

@@ -37,7 +37,7 @@ use remote_settings::{
Attachment, RemoteSettingsClient, RemoteSettingsError, RemoteSettingsRecord, Attachment, RemoteSettingsClient, RemoteSettingsError, RemoteSettingsRecord,
RemoteSettingsService, RemoteSettingsService,
}; };
use serde::{Deserialize, Deserializer, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::{Map, Value}; use serde_json::{Map, Value};
use crate::{error::Error, query::full_keywords_to_fts_content, Result}; use crate::{error::Error, query::full_keywords_to_fts_content, Result};
@@ -200,8 +200,10 @@ pub(crate) enum SuggestRecord {
Fakespot, Fakespot,
#[serde(rename = "dynamic-suggestions")] #[serde(rename = "dynamic-suggestions")]
Dynamic(DownloadedDynamicRecord), Dynamic(DownloadedDynamicRecord),
#[serde(rename = "geonames")] #[serde(rename = "geonames-2")] // version 2
Geonames, Geonames,
#[serde(rename = "geonames-alternates")]
GeonamesAlternates,
} }
impl SuggestRecord { impl SuggestRecord {
@@ -230,6 +232,7 @@ pub enum SuggestRecordType {
Fakespot, Fakespot,
Dynamic, Dynamic,
Geonames, Geonames,
GeonamesAlternates,
} }
impl From<&SuggestRecord> for SuggestRecordType { impl From<&SuggestRecord> for SuggestRecordType {
@@ -247,6 +250,7 @@ impl From<&SuggestRecord> for SuggestRecordType {
SuggestRecord::Fakespot => Self::Fakespot, SuggestRecord::Fakespot => Self::Fakespot,
SuggestRecord::Dynamic(_) => Self::Dynamic, SuggestRecord::Dynamic(_) => Self::Dynamic,
SuggestRecord::Geonames => Self::Geonames, SuggestRecord::Geonames => Self::Geonames,
SuggestRecord::GeonamesAlternates => Self::GeonamesAlternates,
} }
} }
} }
@@ -257,6 +261,12 @@ impl fmt::Display for SuggestRecordType {
} }
} }
impl ToSql for SuggestRecordType {
fn to_sql(&self) -> rusqlite::Result<ToSqlOutput<'_>> {
Ok(ToSqlOutput::from(self.as_str()))
}
}
impl SuggestRecordType { impl SuggestRecordType {
/// Get all record types to iterate over /// Get all record types to iterate over
/// ///
@@ -276,6 +286,7 @@ impl SuggestRecordType {
Self::Fakespot, Self::Fakespot,
Self::Dynamic, Self::Dynamic,
Self::Geonames, Self::Geonames,
Self::GeonamesAlternates,
] ]
} }
@@ -292,7 +303,8 @@ impl SuggestRecordType {
Self::GlobalConfig => "configuration", Self::GlobalConfig => "configuration",
Self::Fakespot => "fakespot-suggestions", Self::Fakespot => "fakespot-suggestions",
Self::Dynamic => "dynamic-suggestions", Self::Dynamic => "dynamic-suggestions",
Self::Geonames => "geonames", Self::Geonames => "geonames-2",
Self::GeonamesAlternates => "geonames-alternates",
} }
} }
} }
@@ -608,15 +620,6 @@ pub(crate) struct DownloadedGlobalConfigInner {
pub show_less_frequently_cap: i32, pub show_less_frequently_cap: i32,
} }
pub(crate) fn deserialize_f64_or_default<'de, D>(
deserializer: D,
) -> std::result::Result<f64, D::Error>
where
D: Deserializer<'de>,
{
String::deserialize(deserializer).map(|s| s.parse().ok().unwrap_or_default())
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;

View File

@@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. * file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/ */
use crate::db::Sqlite3Extension; use crate::{db::Sqlite3Extension, geoname::geonames_collate};
use rusqlite::{Connection, Transaction}; use rusqlite::{Connection, Transaction};
use sql_support::{ use sql_support::{
open_database::{self, ConnectionInitializer}, open_database::{self, ConnectionInitializer},
@@ -23,7 +23,7 @@ use sql_support::{
/// `clear_database()` by adding their names to `conditional_tables`, unless /// `clear_database()` by adding their names to `conditional_tables`, unless
/// they are cleared via a deletion trigger or there's some other good /// they are cleared via a deletion trigger or there's some other good
/// reason not to do so. /// reason not to do so.
pub const VERSION: u32 = 38; pub const VERSION: u32 = 41;
/// The current Suggest database schema. /// The current Suggest database schema.
pub const SQL: &str = " pub const SQL: &str = "
@@ -48,17 +48,17 @@ CREATE TABLE keywords(
PRIMARY KEY (keyword, suggestion_id) PRIMARY KEY (keyword, suggestion_id)
) WITHOUT ROWID; ) WITHOUT ROWID;
-- Metrics for the `keywords` table per provider. Not all providers use or -- Keywords metrics per record ID and type. Currently we only record metrics for
-- update it. If you modify an existing provider to use this, you will need to -- a small number of record types.
-- populate this table somehow with metrics for the provider's existing
-- keywords, for example as part of a schema migration.
CREATE TABLE keywords_metrics( CREATE TABLE keywords_metrics(
record_id TEXT NOT NULL PRIMARY KEY, record_id TEXT NOT NULL PRIMARY KEY,
provider INTEGER NOT NULL, record_type TEXT NOT NULL,
max_length INTEGER NOT NULL, max_len INTEGER NOT NULL,
max_word_count INTEGER NOT NULL max_word_count INTEGER NOT NULL
) WITHOUT ROWID; ) WITHOUT ROWID;
CREATE INDEX keywords_metrics_record_type ON keywords_metrics(record_type);
-- full keywords are what we display to the user when a (partial) keyword matches -- full keywords are what we display to the user when a (partial) keyword matches
CREATE TABLE full_keywords( CREATE TABLE full_keywords(
id INTEGER PRIMARY KEY, id INTEGER PRIMARY KEY,
@@ -196,28 +196,44 @@ CREATE TABLE geonames(
id INTEGER PRIMARY KEY, id INTEGER PRIMARY KEY,
record_id TEXT NOT NULL, record_id TEXT NOT NULL,
name TEXT NOT NULL, name TEXT NOT NULL,
latitude REAL NOT NULL,
longitude REAL NOT NULL,
feature_class TEXT NOT NULL, feature_class TEXT NOT NULL,
feature_code TEXT NOT NULL, feature_code TEXT NOT NULL,
country_code TEXT NOT NULL, country_code TEXT NOT NULL,
admin1_code TEXT NOT NULL, admin1_code TEXT,
population INTEGER NOT NULL admin2_code TEXT,
admin3_code TEXT,
admin4_code TEXT,
population INTEGER,
latitude TEXT,
longitude TEXT
); );
CREATE INDEX geonames_feature_class ON geonames(feature_class);
CREATE INDEX geonames_feature_code ON geonames(feature_code);
-- `language` is a lowercase ISO 639 code: 'en', 'de', 'fr', etc. It can also be
-- a geonames pseudo-language like 'abbr' for abbreviations and 'iata' for
-- airport codes. It will be null for names derived from a geoname's primary
-- name (see `Geoname::name` and `Geoname::ascii_name`).
-- `geoname_id` is not defined as a foreign key because the main geonames
-- records are not guaranteed to be ingested before alternates records.
CREATE TABLE geonames_alternates( CREATE TABLE geonames_alternates(
name TEXT NOT NULL, id INTEGER PRIMARY KEY,
record_id TEXT NOT NULL,
geoname_id INTEGER NOT NULL, geoname_id INTEGER NOT NULL,
-- The value of the `iso_language` field for the alternate. This will be language TEXT,
-- null for the alternate we artificially create for the `name` in the name TEXT NOT NULL COLLATE geonames_collate,
-- corresponding geoname record. is_preferred INTEGER,
iso_language TEXT, is_short INTEGER
PRIMARY KEY (name, geoname_id), );
FOREIGN KEY(geoname_id) REFERENCES geonames(id) ON DELETE CASCADE
) WITHOUT ROWID; CREATE INDEX geonames_alternates_geoname_id_language
CREATE INDEX geonames_alternates_geoname_id ON geonames_alternates(geoname_id); ON geonames_alternates(geoname_id, language);
CREATE INDEX geonames_alternates_name
ON geonames_alternates(name);
CREATE TRIGGER geonames_alternates_delete AFTER DELETE ON geonames BEGIN
DELETE FROM geonames_alternates
WHERE geoname_id = old.id;
END;
CREATE TABLE geonames_metrics( CREATE TABLE geonames_metrics(
record_id TEXT NOT NULL PRIMARY KEY, record_id TEXT NOT NULL PRIMARY KEY,
@@ -255,6 +271,11 @@ impl<'a> SuggestConnectionInitializer<'a> {
} }
Ok(()) Ok(())
} }
fn create_custom_functions(&self, conn: &Connection) -> open_database::Result<()> {
conn.create_collation("geonames_collate", geonames_collate)?;
Ok(())
}
} }
impl ConnectionInitializer for SuggestConnectionInitializer<'_> { impl ConnectionInitializer for SuggestConnectionInitializer<'_> {
@@ -266,7 +287,7 @@ impl ConnectionInitializer for SuggestConnectionInitializer<'_> {
sql_support::setup_sqlite_defaults(conn)?; sql_support::setup_sqlite_defaults(conn)?;
conn.execute("PRAGMA foreign_keys = ON", ())?; conn.execute("PRAGMA foreign_keys = ON", ())?;
sql_support::debug_tools::define_debug_functions(conn)?; sql_support::debug_tools::define_debug_functions(conn)?;
self.create_custom_functions(conn)?;
Ok(()) Ok(())
} }
@@ -276,6 +297,10 @@ impl ConnectionInitializer for SuggestConnectionInitializer<'_> {
} }
fn upgrade_from(&self, tx: &Transaction<'_>, version: u32) -> open_database::Result<()> { fn upgrade_from(&self, tx: &Transaction<'_>, version: u32) -> open_database::Result<()> {
// Custom functions are per connection. `prepare` usually handles
// creating them but on upgrade it's not called before this method is.
self.create_custom_functions(tx)?;
match version { match version {
1..=15 => { 1..=15 => {
// Treat databases with these older schema versions as corrupt, // Treat databases with these older schema versions as corrupt,
@@ -662,6 +687,94 @@ impl ConnectionInitializer for SuggestConnectionInitializer<'_> {
)?; )?;
Ok(()) Ok(())
} }
38 => {
// This migration makes changes to geonames.
tx.execute_batch(
r#"
DROP INDEX geonames_alternates_geoname_id;
DROP TABLE geonames_alternates;
DROP INDEX geonames_feature_class;
DROP INDEX geonames_feature_code;
DROP TABLE geonames;
CREATE TABLE geonames(
id INTEGER PRIMARY KEY,
record_id TEXT NOT NULL,
name TEXT NOT NULL,
feature_class TEXT NOT NULL,
feature_code TEXT NOT NULL,
country_code TEXT NOT NULL,
admin1_code TEXT,
admin2_code TEXT,
admin3_code TEXT,
admin4_code TEXT,
population INTEGER,
latitude TEXT,
longitude TEXT
);
CREATE TABLE geonames_alternates(
record_id TEXT NOT NULL,
geoname_id INTEGER NOT NULL,
language TEXT,
name TEXT NOT NULL COLLATE geonames_collate,
PRIMARY KEY(geoname_id, language, name)
);
CREATE INDEX geonames_alternates_geoname_id ON geonames_alternates(geoname_id);
CREATE INDEX geonames_alternates_name ON geonames_alternates(name);
CREATE TRIGGER geonames_alternates_delete AFTER DELETE ON geonames BEGIN
DELETE FROM geonames_alternates
WHERE geoname_id = old.id;
END;
"#,
)?;
Ok(())
}
39 => {
// This migration makes changes to keywords metrics.
clear_database(tx)?;
tx.execute_batch(
r#"
DROP TABLE keywords_metrics;
CREATE TABLE keywords_metrics(
record_id TEXT NOT NULL PRIMARY KEY,
record_type TEXT NOT NULL,
max_len INTEGER NOT NULL,
max_word_count INTEGER NOT NULL
) WITHOUT ROWID;
CREATE INDEX keywords_metrics_record_type ON keywords_metrics(record_type);
"#,
)?;
Ok(())
}
40 => {
// This migration makes changes to geonames.
clear_database(tx)?;
tx.execute_batch(
r#"
DROP INDEX geonames_alternates_geoname_id;
DROP INDEX geonames_alternates_name;
DROP TABLE geonames_alternates;
CREATE TABLE geonames_alternates(
id INTEGER PRIMARY KEY,
record_id TEXT NOT NULL,
geoname_id INTEGER NOT NULL,
language TEXT,
name TEXT NOT NULL COLLATE geonames_collate,
is_preferred INTEGER,
is_short INTEGER
);
CREATE INDEX geonames_alternates_geoname_id_language
ON geonames_alternates(geoname_id, language);
CREATE INDEX geonames_alternates_name
ON geonames_alternates(name);
"#,
)?;
Ok(())
}
_ => Err(open_database::Error::IncompatibleVersion(version)), _ => Err(open_database::Error::IncompatibleVersion(version)),
} }
} }

View File

@@ -9,7 +9,7 @@ use std::{
sync::Arc, sync::Arc,
}; };
use error_support::{breadcrumb, handle_error}; use error_support::{breadcrumb, handle_error, trace};
use once_cell::sync::OnceCell; use once_cell::sync::OnceCell;
use parking_lot::Mutex; use parking_lot::Mutex;
use remote_settings::{self, RemoteSettingsError, RemoteSettingsServer, RemoteSettingsService}; use remote_settings::{self, RemoteSettingsError, RemoteSettingsServer, RemoteSettingsService};
@@ -20,7 +20,7 @@ use crate::{
config::{SuggestGlobalConfig, SuggestProviderConfig}, config::{SuggestGlobalConfig, SuggestProviderConfig},
db::{ConnectionType, IngestedRecord, Sqlite3Extension, SuggestDao, SuggestDb}, db::{ConnectionType, IngestedRecord, Sqlite3Extension, SuggestDao, SuggestDb},
error::Error, error::Error,
geoname::{Geoname, GeonameMatch, GeonameType}, geoname::{Geoname, GeonameAlternates, GeonameMatch},
metrics::{MetricsContext, SuggestIngestionMetrics, SuggestQueryMetrics}, metrics::{MetricsContext, SuggestIngestionMetrics, SuggestQueryMetrics},
provider::{SuggestionProvider, SuggestionProviderConstraints, DEFAULT_INGEST_PROVIDERS}, provider::{SuggestionProvider, SuggestionProviderConstraints, DEFAULT_INGEST_PROVIDERS},
rs::{ rs::{
@@ -306,36 +306,26 @@ impl SuggestStore {
/// Fetches geonames stored in the database. A geoname represents a /// Fetches geonames stored in the database. A geoname represents a
/// geographic place. /// geographic place.
/// ///
/// `query` is a string that will be matched directly against geoname names. /// See `fetch_geonames` in `geoname.rs` for documentation.
/// It is not a query string in the usual Suggest sense. `match_name_prefix`
/// determines whether prefix matching is performed on names excluding
/// abbreviations and airport codes. When `true`, names that start with
/// `query` will match. When false, names that equal `query` will match.
///
/// `geoname_type` restricts returned geonames to a [`GeonameType`].
///
/// `filter` restricts returned geonames to certain cities or regions.
/// Cities can be restricted to regions by including the regions in
/// `filter`, and regions can be restricted to those containing certain
/// cities by including the cities in `filter`. This is especially useful
/// since city and region names are not unique. `filter` is disjunctive: If
/// any item in `filter` matches a geoname, the geoname will be filtered in.
///
/// The query can match a single geoname in more than one way. For example,
/// it can match both a full name and an abbreviation. The returned vec of
/// [`GeonameMatch`] values will include all matches for a geoname, one
/// match per `match_type` per geoname. In other words, a matched geoname
/// can map to more than one `GeonameMatch`.
#[handle_error(Error)] #[handle_error(Error)]
pub fn fetch_geonames( pub fn fetch_geonames(
&self, &self,
query: &str, query: &str,
match_name_prefix: bool, match_name_prefix: bool,
geoname_type: Option<GeonameType>,
filter: Option<Vec<Geoname>>, filter: Option<Vec<Geoname>>,
) -> SuggestApiResult<Vec<GeonameMatch>> { ) -> SuggestApiResult<Vec<GeonameMatch>> {
self.inner self.inner.fetch_geonames(query, match_name_prefix, filter)
.fetch_geonames(query, match_name_prefix, geoname_type, filter) }
/// Fetches a geoname's names stored in the database.
///
/// See `fetch_geoname_alternates` in `geoname.rs` for documentation.
#[handle_error(Error)]
pub fn fetch_geoname_alternates(
&self,
geoname: &Geoname,
) -> SuggestApiResult<GeonameAlternates> {
self.inner.fetch_geoname_alternates(geoname)
} }
} }
@@ -569,18 +559,22 @@ impl<S> SuggestStoreInner<S> {
&self, &self,
query: &str, query: &str,
match_name_prefix: bool, match_name_prefix: bool,
geoname_type: Option<GeonameType>,
filter: Option<Vec<Geoname>>, filter: Option<Vec<Geoname>>,
) -> Result<Vec<GeonameMatch>> { ) -> Result<Vec<GeonameMatch>> {
self.dbs()?.reader.read(|dao| { self.dbs()?.reader.read(|dao| {
dao.fetch_geonames( dao.fetch_geonames(
query, query,
match_name_prefix, match_name_prefix,
geoname_type,
filter.as_ref().map(|f| f.iter().collect()), filter.as_ref().map(|f| f.iter().collect()),
) )
}) })
} }
pub fn fetch_geoname_alternates(&self, geoname: &Geoname) -> Result<GeonameAlternates> {
self.dbs()?
.reader
.read(|dao| dao.fetch_geoname_alternates(geoname))
}
} }
impl<S> SuggestStoreInner<S> impl<S> SuggestStoreInner<S>
@@ -664,7 +658,7 @@ where
context: &mut MetricsContext, context: &mut MetricsContext,
) -> Result<()> { ) -> Result<()> {
for record in &changes.new { for record in &changes.new {
log::trace!("Ingesting record ID: {}", record.id.as_str()); trace!("Ingesting record ID: {}", record.id.as_str());
self.process_record(dao, record, constraints, context)?; self.process_record(dao, record, constraints, context)?;
} }
for record in &changes.updated { for record in &changes.updated {
@@ -672,20 +666,20 @@ where
// Suggestions in particular don't have a stable identifier, and // Suggestions in particular don't have a stable identifier, and
// determining which suggestions in the record actually changed is // determining which suggestions in the record actually changed is
// more complicated than dropping and re-ingesting all of them. // more complicated than dropping and re-ingesting all of them.
log::trace!("Reingesting updated record ID: {}", record.id.as_str()); trace!("Reingesting updated record ID: {}", record.id.as_str());
dao.delete_record_data(&record.id)?; dao.delete_record_data(&record.id)?;
self.process_record(dao, record, constraints, context)?; self.process_record(dao, record, constraints, context)?;
} }
for record in &changes.unchanged { for record in &changes.unchanged {
if self.should_reprocess_record(dao, record, constraints)? { if self.should_reprocess_record(dao, record, constraints)? {
log::trace!("Reingesting unchanged record ID: {}", record.id.as_str()); trace!("Reingesting unchanged record ID: {}", record.id.as_str());
self.process_record(dao, record, constraints, context)?; self.process_record(dao, record, constraints, context)?;
} else { } else {
log::trace!("Skipping unchanged record ID: {}", record.id.as_str()); trace!("Skipping unchanged record ID: {}", record.id.as_str());
} }
} }
for record in &changes.deleted { for record in &changes.deleted {
log::trace!("Deleting record ID: {:?}", record.id); trace!("Deleting record ID: {:?}", record.id);
dao.delete_record_data(&record.id)?; dao.delete_record_data(&record.id)?;
} }
dao.update_ingested_records( dao.update_ingested_records(
@@ -776,7 +770,10 @@ where
)?; )?;
} }
} }
SuggestRecord::Geonames => self.process_geoname_record(dao, record, context)?, SuggestRecord::Geonames => self.process_geonames_record(dao, record, context)?,
SuggestRecord::GeonamesAlternates => {
self.process_geonames_alternates_record(dao, record, context)?
}
} }
Ok(()) Ok(())
} }
@@ -1062,11 +1059,10 @@ pub(crate) mod tests {
&self, &self,
query: &str, query: &str,
match_name_prefix: bool, match_name_prefix: bool,
geoname_type: Option<GeonameType>,
filter: Option<Vec<Geoname>>, filter: Option<Vec<Geoname>>,
) -> Vec<GeonameMatch> { ) -> Vec<GeonameMatch> {
self.inner self.inner
.fetch_geonames(query, match_name_prefix, geoname_type, filter) .fetch_geonames(query, match_name_prefix, filter)
.expect("Error fetching geonames") .expect("Error fetching geonames")
} }
} }

View File

@@ -5,7 +5,7 @@
use chrono::Local; use chrono::Local;
use crate::db::DEFAULT_SUGGESTION_SCORE; use crate::{db::DEFAULT_SUGGESTION_SCORE, geoname::Geoname};
/// The template parameter for a timestamp in a "raw" sponsored suggestion URL. /// The template parameter for a timestamp in a "raw" sponsored suggestion URL.
const TIMESTAMP_TEMPLATE: &str = "%YYYYMMDDHH%"; const TIMESTAMP_TEMPLATE: &str = "%YYYYMMDDHH%";
@@ -86,11 +86,7 @@ pub enum Suggestion {
score: f64, score: f64,
}, },
Weather { Weather {
city: Option<String>, city: Option<Geoname>,
region: Option<String>,
country: Option<String>,
latitude: Option<f64>,
longitude: Option<f64>,
score: f64, score: f64,
}, },
Fakespot { Fakespot {

View File

@@ -156,8 +156,6 @@ impl Suggestion {
pub fn before_each() { pub fn before_each() {
static ONCE: Once = Once::new(); static ONCE: Once = Once::new();
ONCE.call_once(|| { ONCE.call_once(|| {
env_logger::Builder::from_env(env_logger::Env::default().default_filter_or("trace")) error_support::init_for_tests_with_level(error_support::Level::Trace);
.is_test(true)
.init();
}); });
} }

View File

@@ -12,13 +12,13 @@ use std::{cmp::Ordering, collections::HashSet};
use crate::{ use crate::{
config::SuggestProviderConfig, config::SuggestProviderConfig,
db::{ db::{
KeywordInsertStatement, KeywordMetricsInsertStatement, SuggestDao, KeywordInsertStatement, KeywordsMetrics, KeywordsMetricsUpdater, SuggestDao,
SuggestionInsertStatement, DEFAULT_SUGGESTION_SCORE, SuggestionInsertStatement, DEFAULT_SUGGESTION_SCORE,
}, },
geoname::{GeonameMatch, GeonameType}, geoname::GeonameMatch,
metrics::MetricsContext, metrics::MetricsContext,
provider::SuggestionProvider, provider::SuggestionProvider,
rs::{Client, Record, SuggestRecordId}, rs::{Client, Record, SuggestRecordId, SuggestRecordType},
store::SuggestStoreInner, store::SuggestStoreInner,
suggestion::Suggestion, suggestion::Suggestion,
util::filter_map_chunks, util::filter_map_chunks,
@@ -39,14 +39,6 @@ pub(crate) struct DownloadedWeatherAttachment {
/// Score for weather suggestions. If there are multiple weather records, we /// Score for weather suggestions. If there are multiple weather records, we
/// use the `score` from the most recently ingested record. /// use the `score` from the most recently ingested record.
pub score: Option<f64>, pub score: Option<f64>,
/// The max length of all keywords in the attachment. Used for keyword
/// metrics. We pre-compute this to avoid doing duplicate work on all user's
/// machines.
pub max_keyword_length: u32,
/// The max word count of all keywords in the attachment. Used for keyword
/// metrics. We pre-compute this to avoid doing duplicate work on all user's
/// machines.
pub max_keyword_word_count: u32,
} }
/// This data is used to service every query handled by the weather provider, so /// This data is used to service every query handled by the weather provider, so
@@ -57,10 +49,8 @@ pub struct WeatherCache {
min_keyword_length: i32, min_keyword_length: i32,
/// Cached value of the same name from `SuggestProviderConfig::Weather`. /// Cached value of the same name from `SuggestProviderConfig::Weather`.
score: f64, score: f64,
/// Max length of all weather keywords. /// Cached weather keywords metrics.
max_keyword_length: usize, keywords_metrics: KeywordsMetrics,
/// Max word count across all weather keywords.
max_keyword_word_count: usize,
} }
impl SuggestDao<'_> { impl SuggestDao<'_> {
@@ -74,28 +64,42 @@ impl SuggestDao<'_> {
// The first step in parsing the query is lowercasing and splitting it // The first step in parsing the query is lowercasing and splitting it
// into words. We want to avoid that work for strings that are so long // into words. We want to avoid that work for strings that are so long
// they can't possibly match. The longest possible weather query is two // they can't possibly match. We'll stipulate that weather queries will
// geonames + one weather keyword + at least two spaces between those // include the following parts at most:
// three components, say, 10 extra characters total for spaces and //
// punctuation. There's no point in an analogous min length check since // * 3 geonames max: city + one admin division like a state + country
// weather suggestions can be matched on city alone and many city names // * 1 weather keyword
// are only a few characters long ("nyc"). // * 3 spaces between the previous geonames and keyword
// * 10 extra chars to allow for extra spaces and punctuation
//
// This will exclude some valid queries because the logic below allows
// for multiple weather keywords, and a city may have more than one
// admin division, but we don't expect many users to type such long
// queries.
//
// There's no point in an analogous min length check since weather
// suggestions can be matched on city alone and many city names are only
// a few characters long ("nyc").
let g_cache = self.geoname_cache(); let g_cache = self.geoname_cache();
let w_cache = self.weather_cache(); let w_cache = self.weather_cache();
let max_query_len = 2 * g_cache.max_name_length + w_cache.max_keyword_length + 10; let max_query_len =
3 * g_cache.keywords_metrics.max_len + w_cache.keywords_metrics.max_len + 10;
if max_query_len < query.keyword.len() { if max_query_len < query.keyword.len() {
return Ok(vec![]); return Ok(vec![]);
} }
let max_chunk_size = let max_chunk_size = std::cmp::max(
std::cmp::max(g_cache.max_name_word_count, w_cache.max_keyword_word_count); g_cache.keywords_metrics.max_word_count,
w_cache.keywords_metrics.max_word_count,
);
// Lowercase, strip punctuation, and split the query into words. // Lowercase, strip punctuation, and split the query into words.
let kw_lower = query.keyword.to_lowercase(); let kw_lower = query.keyword.to_lowercase();
let words: Vec<_> = kw_lower let words: Vec<_> = kw_lower
.split_whitespace() .split_whitespace()
.flat_map(|w| { .flat_map(|w| {
w.split(|c| !char::is_alphabetic(c)) w.split(|c| !char::is_alphanumeric(c))
.filter(|s| !s.is_empty()) .filter(|s| !s.is_empty())
}) })
.collect(); .collect();
@@ -103,107 +107,70 @@ impl SuggestDao<'_> {
let mut matches = let mut matches =
// Step 2: Parse the query words into a list of token paths. // Step 2: Parse the query words into a list of token paths.
filter_map_chunks::<Token>(&words, max_chunk_size, |chunk, chunk_i, path| { filter_map_chunks::<Token>(&words, max_chunk_size, |chunk, chunk_i, path| {
// Match the chunk to token types that haven't already been matched // Find all token types that match the chunk.
// in this path. `all_tokens` will remain `None` until a token is
// matched.
let mut all_tokens: Option<Vec<Token>> = None; let mut all_tokens: Option<Vec<Token>> = None;
for tt in [ for tt in [
TokenType::City, TokenType::Geoname,
TokenType::Region,
TokenType::WeatherKeyword, TokenType::WeatherKeyword,
] { ] {
if !path.iter().any(|t| t.token_type() == tt) { let mut tokens = self.match_weather_tokens(tt, path, chunk, chunk_i == 0)?;
let mut tokens = self.match_weather_tokens(tt, path, chunk, chunk_i == 0)?; if !tokens.is_empty() {
if !tokens.is_empty() { let mut ts = all_tokens.take().unwrap_or_default();
let mut ts = all_tokens.take().unwrap_or_default(); ts.append(&mut tokens);
ts.append(&mut tokens); all_tokens.replace(ts);
all_tokens.replace(ts);
}
} }
} }
// If no tokens were matched, `all_tokens` will be `None`. // If no tokens were matched, `all_tokens` will be `None`.
Ok(all_tokens) Ok(all_tokens)
})? })?
.into_iter() .into_iter()
// Step 3: Map each token path to a city-region-keyword tuple (each // Step 3: Map each token path to a `TokenPath`, which is just a
// optional). Paths are vecs, so they're ordered, so we may end up // convenient representation of the path.
// with duplicate tuples after this step. e.g., the paths .map(TokenPath::from)
// `[<Waterloo IA>, <IA>]` and `[<IA>, <Waterloo IA>]` map to the // Step 4: Filter in paths with the right combination of tokens.
// same `(<Waterloo IA>, <IA>, None)` tuple. // Along with step 2, this is the core of the matching logic.
.map(|path| { .filter(|tp| {
path.into_iter() if let Some(cm) = &tp.city_match {
.fold((None, None, None), |mut match_tuple, token| { // city name typed in full ("new york")
match token { (cm.match_type.is_name() && !cm.prefix)
Token::City(c) => { // city abbreviation typed in full + another related
match_tuple.0 = Some(c); // geoname typed in full ("ny new york")
} || (cm.match_type.is_abbreviation()
Token::Region(r) => { && !cm.prefix
match_tuple.1 = Some(r); && tp.any_other_geoname_typed_in_full)
} // any kind of city + weather keyword ("ny weather",
Token::WeatherKeyword(kw) => { // "weather new y")
match_tuple.2 = Some(kw); || tp.keyword_match
} .as_ref()
} .map(|kwm| kwm.is_min_keyword_length).unwrap_or(false)
match_tuple } else {
}) // weather keyword by itself ("weather")
}) tp.keyword_match.is_some() && !tp.any_other_geoname_matched
// Step 4: Discard tuples that don't have the right combination of
// tokens or that are otherwise invalid. Along with step 2, this is
// the core of the matching logic. In general, allow a tuple if it
// has (a) a city name typed in full or (b) a weather keyword at
// least as long as the config's min keyword length, since that
// indicates a weather intent.
.filter(|(city_match, region_match, kw_match)| {
match (city_match, region_match, kw_match) {
(None, None, Some(_)) => true,
(None, _, None) | (None, Some(_), Some(_)) => false,
(Some(city), region, kw) => {
(city.match_type.is_name() && !city.prefix)
// Allow city abbreviations without a weather
// keyword but only if the region was typed in full.
|| (city.match_type.is_abbreviation()
&& !city.prefix
&& region.as_ref().map(|r| !r.prefix).unwrap_or(false))
|| kw.as_ref().map(|k| k.is_min_keyword_length).unwrap_or(false)
}
} }
}) })
// Step 5: Map each tuple to a city-region tuple: Convert geoname // Step 5: Map each path to its city, an `Option<Geoname>`. Paths
// matches to their `Geoname` values and discard keywords. // without cities will end up as `None` values.
// Discarding keywords is important because we'll collect the tuples .map(|tp| tp.city_match.map(|cm| cm.geoname))
// in a set in the next step in order to dedupe city-regions. // Step 6: Dedupe. We'll end up with an `Option<Geoname>` for each
.map(|(city, region, _)| { // unique matching city + one `None` value if any keywords by
(city.map(|c| c.geoname), region.map(|r| r.geoname)) // themselves were matched.
})
// Step 6: Dedupe the city-regions by collecting them in a set.
.collect::<HashSet<_>>() .collect::<HashSet<_>>()
.into_iter() .into_iter()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// Sort the matches so cities with larger populations are first. // Sort the matches so cities with larger populations are first.
matches.sort_by( matches.sort_by(|city1, city2| match (&city1, &city2) {
|(city1, region1), (city2, region2)| match (&city1, &city2) { (Some(_), None) => Ordering::Less,
(Some(_), None) => Ordering::Less, (None, Some(_)) => Ordering::Greater,
(None, Some(_)) => Ordering::Greater, (Some(c1), Some(c2)) => c2.population.cmp(&c1.population),
(Some(c1), Some(c2)) => c2.population.cmp(&c1.population), (None, None) => Ordering::Equal,
(None, None) => match (&region1, &region2) { });
(Some(_), None) => Ordering::Less,
(None, Some(_)) => Ordering::Greater,
(Some(r1), Some(r2)) => r2.population.cmp(&r1.population),
(None, None) => Ordering::Equal,
},
},
);
// Finally, map matches to suggestions. // Finally, map matches to suggestions.
Ok(matches Ok(matches
.iter() .into_iter()
.map(|(city, _)| Suggestion::Weather { .map(|city| Suggestion::Weather {
city: city.as_ref().map(|c| c.name.clone()), city,
region: city.as_ref().map(|c| c.admin1_code.clone()),
country: city.as_ref().map(|c| c.country_code.clone()),
latitude: city.as_ref().map(|c| c.latitude),
longitude: city.as_ref().map(|c| c.longitude),
score: w_cache.score, score: w_cache.score,
}) })
.collect()) .collect())
@@ -217,48 +184,25 @@ impl SuggestDao<'_> {
is_first_chunk: bool, is_first_chunk: bool,
) -> Result<Vec<Token>> { ) -> Result<Vec<Token>> {
match token_type { match token_type {
TokenType::City => { TokenType::Geoname => {
// Fetch matching cities, and filter them to regions we've // Fetch matching geonames, and filter them to geonames we've
// already matched in this path. // already matched in this path.
let regions: Vec<_> = path let geonames_in_path: Vec<_> = path
.iter() .iter()
.filter_map(|t| t.region().map(|m| &m.geoname)) .filter_map(|t| t.geoname_match().map(|gm| &gm.geoname))
.collect(); .collect();
Ok(self Ok(self
.fetch_geonames( .fetch_geonames(
candidate, candidate,
!is_first_chunk, !is_first_chunk,
Some(GeonameType::City), if geonames_in_path.is_empty() {
if regions.is_empty() {
None None
} else { } else {
Some(regions) Some(geonames_in_path)
}, },
)? )?
.into_iter() .into_iter()
.map(Token::City) .map(Token::Geoname)
.collect())
}
TokenType::Region => {
// Fetch matching regions, and filter them to cities we've
// already matched in this patch.
let cities: Vec<_> = path
.iter()
.filter_map(|t| t.city().map(|m| &m.geoname))
.collect();
Ok(self
.fetch_geonames(
candidate,
!is_first_chunk,
Some(GeonameType::Region),
if cities.is_empty() {
None
} else {
Some(cities)
},
)?
.into_iter()
.map(Token::Region)
.collect()) .collect())
} }
TokenType::WeatherKeyword => { TokenType::WeatherKeyword => {
@@ -328,9 +272,8 @@ impl SuggestDao<'_> {
self.scope.err_if_interrupted()?; self.scope.err_if_interrupted()?;
let mut suggestion_insert = SuggestionInsertStatement::new(self.conn)?; let mut suggestion_insert = SuggestionInsertStatement::new(self.conn)?;
let mut keyword_insert = KeywordInsertStatement::new(self.conn)?; let mut keyword_insert = KeywordInsertStatement::new(self.conn)?;
let mut keyword_metrics_insert = KeywordMetricsInsertStatement::new(self.conn)?; let mut metrics_updater = KeywordsMetricsUpdater::new();
let mut max_len = 0;
let mut max_word_count = 0;
for attach in attachments { for attach in attachments {
let suggestion_id = suggestion_insert.execute( let suggestion_id = suggestion_insert.execute(
record_id, record_id,
@@ -341,49 +284,29 @@ impl SuggestDao<'_> {
)?; )?;
for (i, keyword) in attach.keywords.iter().enumerate() { for (i, keyword) in attach.keywords.iter().enumerate() {
keyword_insert.execute(suggestion_id, keyword, None, i)?; keyword_insert.execute(suggestion_id, keyword, None, i)?;
metrics_updater.update(keyword);
} }
self.put_provider_config(SuggestionProvider::Weather, &attach.into())?; self.put_provider_config(SuggestionProvider::Weather, &attach.into())?;
max_len = std::cmp::max(max_len, attach.max_keyword_length as usize);
max_word_count = std::cmp::max(max_word_count, attach.max_keyword_word_count as usize);
} }
// Update keyword metrics. metrics_updater.finish(
keyword_metrics_insert.execute( self.conn,
record_id, record_id,
SuggestionProvider::Weather, SuggestRecordType::Weather,
max_len, &mut self.weather_cache,
max_word_count,
)?; )?;
// We just made some insertions that might invalidate the data in the
// cache. Clear it so it's repopulated the next time it's accessed.
self.weather_cache.take();
Ok(()) Ok(())
} }
fn weather_cache(&self) -> &WeatherCache { fn weather_cache(&self) -> &WeatherCache {
self.weather_cache.get_or_init(|| { self.weather_cache.get_or_init(|| {
let mut cache = WeatherCache::default(); let mut cache = WeatherCache {
keywords_metrics: self
// keyword metrics .get_keywords_metrics(SuggestRecordType::Weather)
if let Ok((len, word_count)) = self.conn.query_row_and_then( .unwrap_or_default(),
r#" ..WeatherCache::default()
SELECT };
max(max_length) AS len, max(max_word_count) AS word_count
FROM
keywords_metrics
WHERE
provider = :provider
"#,
named_params! {
":provider": SuggestionProvider::Weather
},
|row| -> Result<(usize, usize)> { Ok((row.get("len")?, row.get("word_count")?)) },
) {
cache.max_keyword_length = len;
cache.max_keyword_word_count = word_count;
}
// provider config // provider config
if let Ok(Some(SuggestProviderConfig::Weather { if let Ok(Some(SuggestProviderConfig::Weather {
@@ -428,40 +351,24 @@ impl From<&DownloadedWeatherAttachment> for SuggestProviderConfig {
#[derive(Clone, Debug, Eq, Hash, PartialEq)] #[derive(Clone, Debug, Eq, Hash, PartialEq)]
enum TokenType { enum TokenType {
City, Geoname,
Region,
WeatherKeyword, WeatherKeyword,
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
#[allow(clippy::large_enum_variant)]
enum Token { enum Token {
City(GeonameMatch), Geoname(GeonameMatch),
Region(GeonameMatch),
WeatherKeyword(WeatherKeywordMatch), WeatherKeyword(WeatherKeywordMatch),
} }
impl Token { impl Token {
fn city(&self) -> Option<&GeonameMatch> { fn geoname_match(&self) -> Option<&GeonameMatch> {
match self { match self {
Self::City(g) => Some(g), Self::Geoname(gm) => Some(gm),
_ => None, _ => None,
} }
} }
fn region(&self) -> Option<&GeonameMatch> {
match self {
Self::Region(g) => Some(g),
_ => None,
}
}
fn token_type(&self) -> TokenType {
match self {
Self::City(_) => TokenType::City,
Self::Region(_) => TokenType::Region,
Self::WeatherKeyword(_) => TokenType::WeatherKeyword,
}
}
} }
#[derive(Clone, Debug, Default, Eq, Hash, PartialEq)] #[derive(Clone, Debug, Default, Eq, Hash, PartialEq)]
@@ -470,6 +377,38 @@ struct WeatherKeywordMatch {
is_min_keyword_length: bool, is_min_keyword_length: bool,
} }
#[derive(Default)]
struct TokenPath {
keyword_match: Option<WeatherKeywordMatch>,
city_match: Option<GeonameMatch>,
any_other_geoname_matched: bool,
any_other_geoname_typed_in_full: bool,
}
impl From<Vec<Token>> for TokenPath {
fn from(tokens: Vec<Token>) -> Self {
let mut tp = Self::default();
for t in tokens {
match t {
Token::WeatherKeyword(kwm) => {
tp.keyword_match = Some(kwm);
}
Token::Geoname(gm) => {
if gm.geoname.feature_class == "P" {
tp.city_match = Some(gm);
} else {
tp.any_other_geoname_matched = true;
if !gm.prefix {
tp.any_other_geoname_typed_in_full = true;
}
}
}
}
}
tp
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@@ -480,11 +419,7 @@ mod tests {
impl From<Geoname> for Suggestion { impl From<Geoname> for Suggestion {
fn from(g: Geoname) -> Self { fn from(g: Geoname) -> Self {
Suggestion::Weather { Suggestion::Weather {
city: Some(g.name), city: Some(g),
region: Some(g.admin1_code),
country: Some(g.country_code),
latitude: Some(g.latitude),
longitude: Some(g.longitude),
score: 0.24, score: 0.24,
} }
} }
@@ -499,8 +434,6 @@ mod tests {
json!({ json!({
"min_keyword_length": 3, "min_keyword_length": 3,
"keywords": ["ab", "xyz", "weather"], "keywords": ["ab", "xyz", "weather"],
"max_keyword_length": "weather".len(),
"max_keyword_word_count": 1,
"score": 0.24 "score": 0.24
}), }),
), ),
@@ -530,8 +463,6 @@ mod tests {
// min_keyword_length > 0 means prefixes are allowed. // min_keyword_length > 0 means prefixes are allowed.
"min_keyword_length": 5, "min_keyword_length": 5,
"keywords": ["ab", "xyz", "cdefg", "weather"], "keywords": ["ab", "xyz", "cdefg", "weather"],
"max_keyword_length": "weather".len(),
"max_keyword_word_count": 1,
"score": 0.24 "score": 0.24
}), }),
), ),
@@ -551,13 +482,11 @@ mod tests {
"xcdefg", "xcdefg",
"cdefgx", "cdefgx",
"x cdefg", "x cdefg",
"cdefg x",
"weatherx", "weatherx",
"xweather", "xweather",
"xweat", "xweat",
"weatx", "weatx",
"x weather", "x weather",
" weather x",
"weather foo", "weather foo",
"foo weather", "foo weather",
// too short // too short
@@ -574,11 +503,15 @@ mod tests {
let matches = [ let matches = [
"cdefg", "cdefg",
// full keyword ("cdefg") + prefix of another keyword ("xyz")
"cdefg x",
"weath", "weath",
"weathe", "weathe",
"weather", "weather",
"WeAtHeR", "WeAtHeR",
" weather ", " weather ",
// full keyword ("weather") + prefix of another keyword ("xyz")
" weather x",
]; ];
for q in matches { for q in matches {
assert_eq!( assert_eq!(
@@ -586,11 +519,7 @@ mod tests {
vec![Suggestion::Weather { vec![Suggestion::Weather {
score: 0.24, score: 0.24,
city: None, city: None,
region: None, }]
country: None,
latitude: None,
longitude: None,
},]
); );
} }
@@ -608,8 +537,6 @@ mod tests {
// min_keyword_length == 0 means prefixes are not allowed. // min_keyword_length == 0 means prefixes are not allowed.
"min_keyword_length": 0, "min_keyword_length": 0,
"keywords": ["weather"], "keywords": ["weather"],
"max_keyword_length": "weather".len(),
"max_keyword_word_count": 1,
"score": 0.24 "score": 0.24
}), }),
), ),
@@ -632,11 +559,7 @@ mod tests {
vec![Suggestion::Weather { vec![Suggestion::Weather {
score: 0.24, score: 0.24,
city: None, city: None,
region: None, }]
country: None,
latitude: None,
longitude: None,
},]
); );
} }
@@ -659,8 +582,6 @@ mod tests {
// not two. // not two.
"keywords": ["ab", "xyz", "weather", "weather near me"], "keywords": ["ab", "xyz", "weather", "weather near me"],
"min_keyword_length": 5, "min_keyword_length": 5,
"max_keyword_length": "weather".len(),
"max_keyword_word_count": 1,
"score": 0.24 "score": 0.24
}), }),
)); ));
@@ -706,8 +627,15 @@ mod tests {
), ),
( (
"weather a", "weather a",
// The made-up long-name city starts with A. vec![
vec![geoname::tests::long_name_city().into()], // A suggestion without a city is returned because the query
// also matches a keyword ("weather") + a prefix of another
// keyword ("ab").
Suggestion::Weather {
score: 0.24,
city: None,
},
],
), ),
( (
"weather ac", "weather ac",
@@ -912,8 +840,8 @@ mod tests {
( (
"waterloo", "waterloo",
vec![ vec![
// Waterloo, IA should be first since its population is // Matches should be returned by population descending.
// larger than Waterloo, AL. geoname::tests::waterloo_on().into(),
geoname::tests::waterloo_ia().into(), geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(), geoname::tests::waterloo_al().into(),
], ],
@@ -980,7 +908,10 @@ mod tests {
"ny ny", "ny ny",
vec![geoname::tests::nyc().into()], vec![geoname::tests::nyc().into()],
), ),
("ny ny ny", vec![]), (
"ny ny ny",
vec![geoname::tests::nyc().into()],
),
( (
"ny n", "ny n",
vec![], vec![],
@@ -1121,6 +1052,7 @@ mod tests {
( (
"weather water", "weather water",
vec![ vec![
geoname::tests::waterloo_on().into(),
geoname::tests::waterloo_ia().into(), geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(), geoname::tests::waterloo_al().into(),
], ],
@@ -1128,21 +1060,95 @@ mod tests {
( (
"waterloo w", "waterloo w",
vec![ vec![
geoname::tests::waterloo_on().into(),
geoname::tests::waterloo_ia().into(), geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(), geoname::tests::waterloo_al().into(),
], ],
), ),
("weather w w", vec![]), (
("weather w water", vec![]), // "w" matches "waco", "waterloo", and "weather"
("weather w waterloo", vec![]), "weather w w",
("weather water w", vec![]), vec![
("weather waterloo water", vec![]), geoname::tests::waco().into(),
("weather water water", vec![]), geoname::tests::waterloo_on().into(),
("weather water waterloo", vec![]), geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(),
Suggestion::Weather {
score: 0.24,
city: None,
},
],
),
("weather w water", vec![
geoname::tests::waterloo_on().into(),
geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(),
]),
("weather w waterloo", vec![
geoname::tests::waterloo_on().into(),
geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(),
]),
("weather water w", vec![
geoname::tests::waterloo_on().into(),
geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(),
]),
("weather waterloo water", vec![
geoname::tests::waterloo_on().into(),
geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(),
]),
("weather water water", vec![
geoname::tests::waterloo_on().into(),
geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(),
]),
("weather water waterloo", vec![
geoname::tests::waterloo_on().into(),
geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(),
]),
("waterloo foo", vec![]), ("waterloo foo", vec![]),
("waterloo weather foo", vec![]), ("waterloo weather foo", vec![]),
("foo waterloo", vec![]), ("foo waterloo", vec![]),
("foo waterloo weather", vec![]), ("foo waterloo weather", vec![]),
("weather waterloo foo", vec![]),
("weather foo waterloo", vec![]),
("weather water foo", vec![]),
("weather foo water", vec![]),
(
"waterloo on",
vec![geoname::tests::waterloo_on().into()],
),
(
"waterloo ont",
vec![geoname::tests::waterloo_on().into()],
),
(
"waterloo ont.",
vec![geoname::tests::waterloo_on().into()],
),
(
"waterloo ontario",
vec![geoname::tests::waterloo_on().into()],
),
(
"waterloo canada",
vec![geoname::tests::waterloo_on().into()],
),
(
"waterloo on canada",
vec![geoname::tests::waterloo_on().into()],
),
(
"waterloo on us",
vec![],
),
(
"waterloo al canada",
vec![],
),
( (
"ny", "ny",
vec![], vec![],
@@ -1187,13 +1193,38 @@ mod tests {
"weather roc", "weather roc",
vec![geoname::tests::rochester().into()], vec![geoname::tests::rochester().into()],
), ),
(
"liverpool",
vec![geoname::tests::liverpool_city().into()],
),
(
"liverpool eng",
vec![geoname::tests::liverpool_city().into()],
),
(
"liverpool england",
vec![geoname::tests::liverpool_city().into()],
),
(
"liverpool uk",
vec![geoname::tests::liverpool_city().into()],
),
(
"liverpool england uk",
vec![geoname::tests::liverpool_city().into()],
),
( (
geoname::tests::LONG_NAME, geoname::tests::LONG_NAME,
vec![geoname::tests::long_name_city().into()], vec![geoname::tests::long_name_city().into()],
), ),
(
" waterloo iowa",
vec![geoname::tests::waterloo_ia().into()],
),
( (
" WaTeRlOo ", " WaTeRlOo ",
vec![ vec![
geoname::tests::waterloo_on().into(),
geoname::tests::waterloo_ia().into(), geoname::tests::waterloo_ia().into(),
geoname::tests::waterloo_al().into(), geoname::tests::waterloo_al().into(),
], ],
@@ -1487,21 +1518,21 @@ mod tests {
.with_record(SuggestionProvider::Weather.record( .with_record(SuggestionProvider::Weather.record(
"weather-0", "weather-0",
json!({ json!({
"max_keyword_length": 10,
"max_keyword_word_count": 5,
"min_keyword_length": 3, "min_keyword_length": 3,
"score": 0.24, "score": 0.24,
"keywords": [] "keywords": [
"a b c d ef"
],
}), }),
)) ))
.with_record(SuggestionProvider::Weather.record( .with_record(SuggestionProvider::Weather.record(
"weather-1", "weather-1",
json!({ json!({
"max_keyword_length": 20,
"max_keyword_word_count": 2,
"min_keyword_length": 3, "min_keyword_length": 3,
"score": 0.24, "score": 0.24,
"keywords": [] "keywords": [
"abcdefghik lmnopqrst"
],
}), }),
)), )),
); );
@@ -1513,8 +1544,8 @@ mod tests {
store.read(|dao| { store.read(|dao| {
let cache = dao.weather_cache(); let cache = dao.weather_cache();
assert_eq!(cache.max_keyword_length, 20); assert_eq!(cache.keywords_metrics.max_len, 20);
assert_eq!(cache.max_keyword_word_count, 5); assert_eq!(cache.keywords_metrics.max_word_count, 5);
Ok(()) Ok(())
})?; })?;
@@ -1528,8 +1559,8 @@ mod tests {
}); });
store.read(|dao| { store.read(|dao| {
let cache = dao.weather_cache(); let cache = dao.weather_cache();
assert_eq!(cache.max_keyword_length, 20); assert_eq!(cache.keywords_metrics.max_len, 20);
assert_eq!(cache.max_keyword_word_count, 2); assert_eq!(cache.keywords_metrics.max_word_count, 2);
Ok(()) Ok(())
})?; })?;
@@ -1539,11 +1570,11 @@ mod tests {
.add_record(SuggestionProvider::Weather.record( .add_record(SuggestionProvider::Weather.record(
"weather-3", "weather-3",
json!({ json!({
"max_keyword_length": 15,
"max_keyword_word_count": 3,
"min_keyword_length": 3, "min_keyword_length": 3,
"score": 0.24, "score": 0.24,
"keywords": [] "keywords": [
"abcde fghij klmno"
]
}), }),
)); ));
store.ingest(SuggestIngestionConstraints { store.ingest(SuggestIngestionConstraints {
@@ -1552,8 +1583,8 @@ mod tests {
}); });
store.read(|dao| { store.read(|dao| {
let cache = dao.weather_cache(); let cache = dao.weather_cache();
assert_eq!(cache.max_keyword_length, 20); assert_eq!(cache.keywords_metrics.max_len, 20);
assert_eq!(cache.max_keyword_word_count, 3); assert_eq!(cache.keywords_metrics.max_word_count, 3);
Ok(()) Ok(())
})?; })?;

View File

@@ -1 +1 @@
{"files":{"Cargo.toml":"c8f39fc4260580442b66db092b699c11c8a6398bd38a9354d5c996de5cad4ae7","README.md":"6d4ff5b079ac5340d18fa127f583e7ad793c5a2328b8ecd12c3fc723939804f2","build.rs":"aa971160d67ce8626b26e15c04c34b730f594c45c817aae34cfc9f3ea14ae284","src/bso/content.rs":"92935258745bdf0c3915a555cb6884a7fa69faa1290ec2c1815f6e2f3c0f0562","src/bso/crypto.rs":"27602dcccb37d3a55620ee4e16b705da455d49af575de115c7c79c0178eb1d6d","src/bso/mod.rs":"2da81e81940efe3ad663925a25c03d0ea40584d70e25143dacdba6a1a73e31a9","src/bso/test_utils.rs":"4ec5a2df5e1c0ec14dc770681e959bdcef6ef04f6fde435999197f46a8ae4831","src/client/coll_state.rs":"cad3d1052f7bfe465844bd4125482e3363593f804e583984a1d412b448da1cbf","src/client/coll_update.rs":"dac04a90c29dd969f8b4250414609c9b6d61daf2dfa4ae77d1c4a165ba970b05","src/client/collection_keys.rs":"c27b2277a3a52033b58ab01490fc2ea7007494195dd5e6dc2c6931a4ca96795a","src/client/mod.rs":"8f588d4a035cf79d96f2500f06d5651c1a7c566127c456ffa5429811ddce3fd6","src/client/request.rs":"e878c5b43298b6eb682748474963f9fb8d053b4dc690bbb27107f5fa0ee74e01","src/client/state.rs":"2eaa44fc96e3b4b094380984ce1e2bb11bd2705b061d6cb5be557071aeebd1d5","src/client/status.rs":"f445a8765dac9789444e23b5145148413407bb1d18a15ef56682243997f591bf","src/client/storage_client.rs":"45955046f582db319dcee378f70582a89dfd665ac831456a7243acab7bb0b11b","src/client/sync.rs":"b29abb512ec9d163f7883b71f78c9202802dcb17cad1fc5dc08087fb0bb66704","src/client/sync_multiple.rs":"67a0e6b9049e5b1b1b248febe392b53eb54bb77e9ddddfba62da975389adf3aa","src/client/token.rs":"682863b3eae3aa377397ce052ced7f07d83426072ab311a8851dae9dc8966f68","src/client/util.rs":"71cc70ee41f821f53078675e636e9fad9c6046fa1a989e37f5487e340a2277d6","src/client_types.rs":"3c3cac1540b92482f43660d9e43bdde8481c4cc1a98253a68c80e791231f5976","src/clients_engine/engine.rs":"31c0b6934152f3921af83dadf5d2b22205f49a501427cd736c62f782595cece3","src/clients_engine/mod.rs":"461729e6f89b66b2cbd89b041a03d4d6a8ba582284ed4f3015cb13e1a0c6da97","src/clients_engine/record.rs":"1721e8873da96a019986bbae9df1559c49bf40a16c8cd478904f367652b1e449","src/clients_engine/ser.rs":"be6a19c45eb8002ff8e7cf746d2f97d9cecd1740f9817a8f1d624825475fd777","src/device_type.rs":"dc2d4296d25e31471c8e68488f1043ff239b902036cd6aea8a686cf79b4ed335","src/enc_payload.rs":"aa3eea7df49b24cd59831680a47c417b73a3e36e6b0f3f4baf14ca66bd68be6b","src/engine/bridged_engine.rs":"b4e3071a0259ac55303364e57f9cd685916b80dc302030bba07790e55ceecb66","src/engine/mod.rs":"d0d031d80fbdd90686c443b8c44720ab2ab0aff2c1106e0fdd7d60c46361fe8b","src/engine/request.rs":"5923025fb9550178339f880a1bf8526d8e853e7a0b2bce6d9d687cc808ac0085","src/engine/sync_engine.rs":"531b35d72ce9e04c3e543c0468c1e450fba2c0dc3d33d68d9b1c0a5c1ad7dd34","src/error.rs":"a45cfe02e6301f473c34678b694943c1a04308b8c292c6e0448bf495194c3b5e","src/key_bundle.rs":"abd0781f3be8c8e7c691f18bb71f3433b633803c48da9794e15ac6301ed60d6c","src/lib.rs":"f59f8817978d943518dfa03ab31fc0f6b1fc72ee9943a97aef1537e2769649f5","src/record_types.rs":"02bb3d352fb808131d298f9b90d9c95b7e9e0138b97c5401f3b9fdacc5562f44","src/server_timestamp.rs":"63916817796e83fe31fbd598bac025dfa71ec9e1808d09073db258c78a3331cd","src/sync15.udl":"464047a67a7877bc671f9f3aca13f3039cf34beb51756bcdb86015d789a8f400","src/telemetry.rs":"f332b3849824db6b131a7c2dfe20f56075c6a66ad72f6697bc283d914126b423","uniffi.toml":"d9a5a5cb0eee5218f5eee4d8d89214cc1d7fb5b49323fd17becdf4adb706a6aa"},"package":null} {"files":{"Cargo.toml":"a244821aa5f4d338564b745368788a19401f1ce78da0d6cefeac0e16f776f8d4","README.md":"6d4ff5b079ac5340d18fa127f583e7ad793c5a2328b8ecd12c3fc723939804f2","build.rs":"aa971160d67ce8626b26e15c04c34b730f594c45c817aae34cfc9f3ea14ae284","src/bso/content.rs":"c34a689d5d910bc612c5ebfe34db00d5bf6710bfb657d6770e63e62c3cfedbe4","src/bso/crypto.rs":"27602dcccb37d3a55620ee4e16b705da455d49af575de115c7c79c0178eb1d6d","src/bso/mod.rs":"2da81e81940efe3ad663925a25c03d0ea40584d70e25143dacdba6a1a73e31a9","src/bso/test_utils.rs":"4ec5a2df5e1c0ec14dc770681e959bdcef6ef04f6fde435999197f46a8ae4831","src/client/coll_state.rs":"db1b5a3d2a274698218b18e9c7552bf2868b8a860815ac677b91ecc4f3ea8afc","src/client/coll_update.rs":"dac04a90c29dd969f8b4250414609c9b6d61daf2dfa4ae77d1c4a165ba970b05","src/client/collection_keys.rs":"c27b2277a3a52033b58ab01490fc2ea7007494195dd5e6dc2c6931a4ca96795a","src/client/mod.rs":"8f588d4a035cf79d96f2500f06d5651c1a7c566127c456ffa5429811ddce3fd6","src/client/request.rs":"2b5d2c77279d4d922b845020ffa0c6f34f84400fffff772cd4ab3a79f23e45a0","src/client/state.rs":"7efbff1fb0b545d45e2c5b1b4b0f42748eaec91b51fb4c41cbc31cce67903f6c","src/client/status.rs":"f445a8765dac9789444e23b5145148413407bb1d18a15ef56682243997f591bf","src/client/storage_client.rs":"ca74d26cf177a16697627d919add5b7e33f22f5e7357a5c5c546edb6687dd8f8","src/client/sync.rs":"ffcca4a2417ce4a6404363f3c1446def32f1297c14c27750ed69e6c03e23b9c1","src/client/sync_multiple.rs":"6d91cae760d7553f19443df9dc38cbdfb66b996a9a7b3d7a15f4179233c7dff9","src/client/token.rs":"ddfeb461f1be1b775a185c9b0cb372744032cf62ddb30666c672e73f761fab5d","src/client/util.rs":"71cc70ee41f821f53078675e636e9fad9c6046fa1a989e37f5487e340a2277d6","src/client_types.rs":"3c3cac1540b92482f43660d9e43bdde8481c4cc1a98253a68c80e791231f5976","src/clients_engine/engine.rs":"b6556f16eaa83e922686d838fc8ec53498da274445762250c10b69d268f20eda","src/clients_engine/mod.rs":"461729e6f89b66b2cbd89b041a03d4d6a8ba582284ed4f3015cb13e1a0c6da97","src/clients_engine/record.rs":"736870c81dff89a05c0703eef52beda83f45fe9f8d989dfb6e13b88781f1198f","src/clients_engine/ser.rs":"be6a19c45eb8002ff8e7cf746d2f97d9cecd1740f9817a8f1d624825475fd777","src/device_type.rs":"dc2d4296d25e31471c8e68488f1043ff239b902036cd6aea8a686cf79b4ed335","src/enc_payload.rs":"aa3eea7df49b24cd59831680a47c417b73a3e36e6b0f3f4baf14ca66bd68be6b","src/engine/bridged_engine.rs":"00220c64abc42e5c37e87efd6dbfe5c4c48507b9960e9ba722ac93e038ccfd30","src/engine/mod.rs":"d0d031d80fbdd90686c443b8c44720ab2ab0aff2c1106e0fdd7d60c46361fe8b","src/engine/request.rs":"5923025fb9550178339f880a1bf8526d8e853e7a0b2bce6d9d687cc808ac0085","src/engine/sync_engine.rs":"531b35d72ce9e04c3e543c0468c1e450fba2c0dc3d33d68d9b1c0a5c1ad7dd34","src/error.rs":"1a2164a6ee6376feabcc02ff66cc0841aca70e436cce69eb0ea893b020225217","src/key_bundle.rs":"a68b484ec8702a269645e892aa0b3161943388dac6b160e87393617139fad22a","src/lib.rs":"f59f8817978d943518dfa03ab31fc0f6b1fc72ee9943a97aef1537e2769649f5","src/record_types.rs":"02bb3d352fb808131d298f9b90d9c95b7e9e0138b97c5401f3b9fdacc5562f44","src/server_timestamp.rs":"63916817796e83fe31fbd598bac025dfa71ec9e1808d09073db258c78a3331cd","src/sync15.udl":"464047a67a7877bc671f9f3aca13f3039cf34beb51756bcdb86015d789a8f400","src/telemetry.rs":"7261f0241587c8533239b6f19345d5849f6242bab62fe46e9dab5b57614a52db","uniffi.toml":"d9a5a5cb0eee5218f5eee4d8d89214cc1d7fb5b49323fd17becdf4adb706a6aa"},"package":null}

View File

@@ -51,7 +51,6 @@ path = "src/lib.rs"
[dependencies] [dependencies]
anyhow = "1.0" anyhow = "1.0"
lazy_static = "1.4" lazy_static = "1.4"
log = "0.4"
serde_derive = "1" serde_derive = "1"
serde_json = "1" serde_json = "1"
serde_path_to_error = "0.1" serde_path_to_error = "0.1"
@@ -98,9 +97,9 @@ optional = true
path = "../viaduct" path = "../viaduct"
optional = true optional = true
[dev-dependencies.env_logger] [dev-dependencies.error-support]
version = "0.10" path = "../support/error"
default-features = false features = ["testing"]
[dev-dependencies.nss] [dev-dependencies.nss]
path = "../support/rc_crypto/nss" path = "../support/rc_crypto/nss"

View File

@@ -10,6 +10,7 @@
//! * Turn arbitrary <T> objects with an `id` field into an OutgoingBso. //! * Turn arbitrary <T> objects with an `id` field into an OutgoingBso.
use super::{IncomingBso, IncomingContent, IncomingKind, OutgoingBso, OutgoingEnvelope}; use super::{IncomingBso, IncomingContent, IncomingKind, OutgoingBso, OutgoingEnvelope};
use crate::error::{trace, warn};
use crate::Guid; use crate::Guid;
use error_support::report_error; use error_support::report_error;
use serde::Serialize; use serde::Serialize;
@@ -65,7 +66,7 @@ impl IncomingBso {
} }
Err(e) => { Err(e) => {
// payload isn't valid json. // payload isn't valid json.
log::warn!("Invalid incoming cleartext {}: {}", self.envelope.id, e); warn!("Invalid incoming cleartext {}: {}", self.envelope.id, e);
IncomingContent { IncomingContent {
envelope: self.envelope, envelope: self.envelope,
kind: IncomingKind::Malformed, kind: IncomingKind::Malformed,
@@ -140,7 +141,7 @@ where
Some(serde_json::Value::String(content_id)) => { Some(serde_json::Value::String(content_id)) => {
// It exists in the payload! We treat a mismatch as malformed. // It exists in the payload! We treat a mismatch as malformed.
if content_id != id { if content_id != id {
log::trace!( trace!(
"malformed incoming record: envelope id: {} payload id: {}", "malformed incoming record: envelope id: {} payload id: {}",
content_id, content_id,
id id
@@ -152,7 +153,7 @@ where
return IncomingKind::Malformed; return IncomingKind::Malformed;
} }
if !id.is_valid_for_sync_server() { if !id.is_valid_for_sync_server() {
log::trace!("malformed incoming record: id is not valid: {}", id); trace!("malformed incoming record: id is not valid: {}", id);
report_error!( report_error!(
"incoming-invalid-bad-payload-id", "incoming-invalid-bad-payload-id",
"ID in the payload is invalid" "ID in the payload is invalid"
@@ -163,14 +164,14 @@ where
Some(v) => { Some(v) => {
// It exists in the payload but is not a string - they can't possibly be // It exists in the payload but is not a string - they can't possibly be
// the same as the envelope uses a String, so must be malformed. // the same as the envelope uses a String, so must be malformed.
log::trace!("malformed incoming record: id is not a string: {}", v); trace!("malformed incoming record: id is not a string: {}", v);
report_error!("incoming-invalid-wrong_type", "ID is not a string"); report_error!("incoming-invalid-wrong_type", "ID is not a string");
return IncomingKind::Malformed; return IncomingKind::Malformed;
} }
None => { None => {
// Doesn't exist in the payload - add it before trying to deser a T. // Doesn't exist in the payload - add it before trying to deser a T.
if !id.is_valid_for_sync_server() { if !id.is_valid_for_sync_server() {
log::trace!("malformed incoming record: id is not valid: {}", id); trace!("malformed incoming record: id is not valid: {}", id);
report_error!( report_error!(
"incoming-invalid-bad-envelope-id", "incoming-invalid-bad-envelope-id",
"ID in envelope is not valid" "ID in envelope is not valid"
@@ -254,7 +255,7 @@ mod tests {
} }
#[test] #[test]
fn test_content_deser() { fn test_content_deser() {
env_logger::try_init().ok(); error_support::init_for_tests();
let json = json!({ let json = json!({
"id": "test", "id": "test",
"payload": json!({"data": 1}).to_string(), "payload": json!({"data": 1}).to_string(),
@@ -271,7 +272,7 @@ mod tests {
#[test] #[test]
fn test_content_deser_empty_id() { fn test_content_deser_empty_id() {
env_logger::try_init().ok(); error_support::init_for_tests();
let json = json!({ let json = json!({
"id": "", "id": "",
"payload": json!({"data": 1}).to_string(), "payload": json!({"data": 1}).to_string(),
@@ -286,7 +287,7 @@ mod tests {
#[test] #[test]
fn test_content_deser_invalid() { fn test_content_deser_invalid() {
env_logger::try_init().ok(); error_support::init_for_tests();
// And a non-empty but still invalid guid. // And a non-empty but still invalid guid.
let json = json!({ let json = json!({
"id": "X".repeat(65), "id": "X".repeat(65),
@@ -299,7 +300,7 @@ mod tests {
#[test] #[test]
fn test_content_deser_not_string() { fn test_content_deser_not_string() {
env_logger::try_init().ok(); error_support::init_for_tests();
// A non-string id. // A non-string id.
let json = json!({ let json = json!({
"id": "0", "id": "0",
@@ -312,7 +313,7 @@ mod tests {
#[test] #[test]
fn test_content_ser_with_id() { fn test_content_ser_with_id() {
env_logger::try_init().ok(); error_support::init_for_tests();
// When serializing, expect the ID to be in the top-level payload (ie, // When serializing, expect the ID to be in the top-level payload (ie,
// in the envelope) but should not appear in the cleartext `payload` part of // in the envelope) but should not appear in the cleartext `payload` part of
// the payload. // the payload.
@@ -332,7 +333,7 @@ mod tests {
#[test] #[test]
fn test_content_ser_with_envelope() { fn test_content_ser_with_envelope() {
env_logger::try_init().ok(); error_support::init_for_tests();
// When serializing, expect the ID to be in the top-level payload (ie, // When serializing, expect the ID to be in the top-level payload (ie,
// in the envelope) but should not appear in the cleartext `payload` // in the envelope) but should not appear in the cleartext `payload`
let val = TestStruct { let val = TestStruct {
@@ -353,7 +354,7 @@ mod tests {
#[test] #[test]
#[should_panic] #[should_panic]
fn test_content_ser_no_ids() { fn test_content_ser_no_ids() {
env_logger::try_init().ok(); error_support::init_for_tests();
#[derive(Serialize)] #[derive(Serialize)]
struct StructWithNoId { struct StructWithNoId {
data: u32, data: u32,
@@ -365,14 +366,14 @@ mod tests {
#[test] #[test]
#[should_panic] #[should_panic]
fn test_content_ser_not_object() { fn test_content_ser_not_object() {
env_logger::try_init().ok(); error_support::init_for_tests();
let _ = OutgoingBso::from_content_with_id(json!("string")); let _ = OutgoingBso::from_content_with_id(json!("string"));
} }
#[test] #[test]
#[should_panic] #[should_panic]
fn test_content_ser_mismatched_ids() { fn test_content_ser_mismatched_ids() {
env_logger::try_init().ok(); error_support::init_for_tests();
let val = TestStruct { let val = TestStruct {
id: Guid::new("test"), id: Guid::new("test"),
data: 1, data: 1,
@@ -384,7 +385,7 @@ mod tests {
#[test] #[test]
#[should_panic] #[should_panic]
fn test_content_empty_id() { fn test_content_empty_id() {
env_logger::try_init().ok(); error_support::init_for_tests();
let val = TestStruct { let val = TestStruct {
id: Guid::new(""), id: Guid::new(""),
data: 1, data: 1,
@@ -395,7 +396,7 @@ mod tests {
#[test] #[test]
#[should_panic] #[should_panic]
fn test_content_invalid_id() { fn test_content_invalid_id() {
env_logger::try_init().ok(); error_support::init_for_tests();
let val = TestStruct { let val = TestStruct {
id: Guid::new(&"X".repeat(65)), id: Guid::new(&"X".repeat(65)),
data: 1, data: 1,

View File

@@ -6,6 +6,7 @@ use super::request::InfoConfiguration;
use super::{CollectionKeys, GlobalState}; use super::{CollectionKeys, GlobalState};
use crate::engine::{CollSyncIds, EngineSyncAssociation, SyncEngine}; use crate::engine::{CollSyncIds, EngineSyncAssociation, SyncEngine};
use crate::error; use crate::error;
use crate::error::{info, trace, warn};
use crate::KeyBundle; use crate::KeyBundle;
use crate::ServerTimestamp; use crate::ServerTimestamp;
@@ -111,7 +112,7 @@ impl<'state> LocalCollStateMachine<'state> {
LocalCollState::SyncIdChanged { ids } => { LocalCollState::SyncIdChanged { ids } => {
let assoc = EngineSyncAssociation::Connected(ids); let assoc = EngineSyncAssociation::Connected(ids);
log::info!("Resetting {} engine", engine.collection_name()); info!("Resetting {} engine", engine.collection_name());
engine.reset(&assoc)?; engine.reset(&assoc)?;
Ok(LocalCollState::Unknown { assoc }) Ok(LocalCollState::Unknown { assoc })
} }
@@ -132,14 +133,14 @@ impl<'state> LocalCollStateMachine<'state> {
// 10 goes around. // 10 goes around.
let mut count = 0; let mut count = 0;
loop { loop {
log::trace!("LocalCollState in {:?}", s); trace!("LocalCollState in {:?}", s);
match s { match s {
LocalCollState::Ready { coll_state } => return Ok(Some(coll_state)), LocalCollState::Ready { coll_state } => return Ok(Some(coll_state)),
LocalCollState::Declined | LocalCollState::NoSuchCollection => return Ok(None), LocalCollState::Declined | LocalCollState::NoSuchCollection => return Ok(None),
_ => { _ => {
count += 1; count += 1;
if count > 10 { if count > 10 {
log::warn!("LocalCollStateMachine appears to be looping"); warn!("LocalCollStateMachine appears to be looping");
return Ok(None); return Ok(None);
} }
// should we have better loop detection? Our limit of 10 // should we have better loop detection? Our limit of 10

View File

@@ -4,7 +4,7 @@
use super::storage_client::Sync15ClientResponse; use super::storage_client::Sync15ClientResponse;
use crate::bso::OutgoingEncryptedBso; use crate::bso::OutgoingEncryptedBso;
use crate::error::{self, Error as ErrorKind, Result}; use crate::error::{self, debug, info, warn, Error as ErrorKind, Result};
use crate::ServerTimestamp; use crate::ServerTimestamp;
use serde_derive::*; use serde_derive::*;
use std::collections::HashMap; use std::collections::HashMap;
@@ -262,7 +262,7 @@ where
|| self.batch_limits.can_never_add(payload_length) || self.batch_limits.can_never_add(payload_length)
|| payload_length >= self.max_payload_bytes || payload_length >= self.max_payload_bytes
{ {
log::warn!( warn!(
"Single record too large to submit to server ({} b)", "Single record too large to submit to server ({} b)",
payload_length payload_length
); );
@@ -299,7 +299,7 @@ where
if item_len >= self.max_request_bytes { if item_len >= self.max_request_bytes {
self.queued.truncate(item_start); self.queued.truncate(item_start);
log::warn!( warn!(
"Single record too large to submit to server ({} b)", "Single record too large to submit to server ({} b)",
item_len item_len
); );
@@ -311,11 +311,9 @@ where
let can_send_record = self.queued.len() < self.max_request_bytes; let can_send_record = self.queued.len() < self.max_request_bytes;
if !can_post_record || !can_send_record || !can_batch_record { if !can_post_record || !can_send_record || !can_batch_record {
log::debug!( debug!(
"PostQueue flushing! (can_post = {}, can_send = {}, can_batch = {})", "PostQueue flushing! (can_post = {}, can_send = {}, can_batch = {})",
can_post_record, can_post_record, can_send_record, can_batch_record
can_send_record,
can_batch_record
); );
// "unwrite" the record. // "unwrite" the record.
self.queued.truncate(item_start); self.queued.truncate(item_start);
@@ -353,7 +351,7 @@ where
BatchState::InBatch(ref s) => Some(s.clone()), BatchState::InBatch(ref s) => Some(s.clone()),
}; };
log::info!( info!(
"Posting {} records of {} bytes", "Posting {} records of {} bytes",
self.post_limits.cur_records, self.post_limits.cur_records,
self.queued.len() self.queued.len()
@@ -397,7 +395,7 @@ where
} }
if want_commit { if want_commit {
log::debug!("Committed batch {:?}", self.batch); debug!("Committed batch {:?}", self.batch);
self.batch = BatchState::NoBatch; self.batch = BatchState::NoBatch;
self.on_response.handle_response(resp, false)?; self.on_response.handle_response(resp, false)?;
return Ok(()); return Ok(());
@@ -426,7 +424,7 @@ where
match &self.batch { match &self.batch {
BatchState::Unsupported => { BatchState::Unsupported => {
log::warn!("Server changed its mind about supporting batching mid-batch..."); warn!("Server changed its mind about supporting batching mid-batch...");
} }
BatchState::InBatch(ref cur_id) => { BatchState::InBatch(ref cur_id) => {

View File

@@ -8,7 +8,7 @@ use super::request::{InfoCollections, InfoConfiguration};
use super::storage_client::{SetupStorageClient, Sync15ClientResponse}; use super::storage_client::{SetupStorageClient, Sync15ClientResponse};
use super::CollectionKeys; use super::CollectionKeys;
use crate::bso::OutgoingEncryptedBso; use crate::bso::OutgoingEncryptedBso;
use crate::error::{self, Error as ErrorKind, ErrorResponse}; use crate::error::{self, debug, info, trace, warn, Error as ErrorKind, ErrorResponse};
use crate::record_types::{MetaGlobalEngine, MetaGlobalRecord}; use crate::record_types::{MetaGlobalEngine, MetaGlobalRecord};
use crate::EncryptedPayload; use crate::EncryptedPayload;
use crate::{Guid, KeyBundle, ServerTimestamp}; use crate::{Guid, KeyBundle, ServerTimestamp};
@@ -96,7 +96,7 @@ struct EngineStateOutput {
fn compute_engine_states(input: EngineStateInput) -> EngineStateOutput { fn compute_engine_states(input: EngineStateInput) -> EngineStateOutput {
use super::util::*; use super::util::*;
log::debug!("compute_engine_states: input {:?}", input); debug!("compute_engine_states: input {:?}", input);
let (must_enable, must_disable) = partition_by_value(&input.user_changes); let (must_enable, must_disable) = partition_by_value(&input.user_changes);
let have_remote = input.remote.is_some(); let have_remote = input.remote.is_some();
let RemoteEngineState { let RemoteEngineState {
@@ -107,7 +107,7 @@ fn compute_engine_states(input: EngineStateInput) -> EngineStateOutput {
let both_declined_and_remote = set_intersection(&info_collections, &remote_declined); let both_declined_and_remote = set_intersection(&info_collections, &remote_declined);
if !both_declined_and_remote.is_empty() { if !both_declined_and_remote.is_empty() {
// Should we wipe these too? // Should we wipe these too?
log::warn!( warn!(
"Remote state contains engines which are in both info/collections and meta/global's declined: {:?}", "Remote state contains engines which are in both info/collections and meta/global's declined: {:?}",
both_declined_and_remote, both_declined_and_remote,
); );
@@ -137,7 +137,7 @@ fn compute_engine_states(input: EngineStateInput) -> EngineStateOutput {
declined: result_declined, declined: result_declined,
}; };
// No PII here and this helps debug problems. // No PII here and this helps debug problems.
log::debug!("compute_engine_states: output {:?}", output); debug!("compute_engine_states: output {:?}", output);
output output
} }
@@ -208,7 +208,7 @@ fn fixup_meta_global(global: &mut MetaGlobalRecord) -> bool {
let should_have_engine = !global.declined.iter().any(|c| c == name); let should_have_engine = !global.declined.iter().any(|c| c == name);
if had_engine != should_have_engine { if had_engine != should_have_engine {
if should_have_engine { if should_have_engine {
log::debug!("SyncID for engine {:?} was missing", name); debug!("SyncID for engine {:?} was missing", name);
global.engines.insert( global.engines.insert(
name.to_string(), name.to_string(),
MetaGlobalEngine { MetaGlobalEngine {
@@ -217,7 +217,7 @@ fn fixup_meta_global(global: &mut MetaGlobalRecord) -> bool {
}, },
); );
} else { } else {
log::debug!("SyncID for engine {:?} was present, but shouldn't be", name); debug!("SyncID for engine {:?} was present, but shouldn't be", name);
global.engines.remove(name); global.engines.remove(name);
} }
changed_any = true; changed_any = true;
@@ -353,7 +353,7 @@ impl<'a> SetupStateMachine<'a> {
if global.storage_version < STORAGE_VERSION { if global.storage_version < STORAGE_VERSION {
Ok(FreshStartRequired { config }) Ok(FreshStartRequired { config })
} else { } else {
log::info!("Have info/collections and meta/global. Computing new engine states"); info!("Have info/collections and meta/global. Computing new engine states");
let initial_global_declined: HashSet<String> = let initial_global_declined: HashSet<String> =
global.declined.iter().cloned().collect(); global.declined.iter().cloned().collect();
let result = compute_engine_states(EngineStateInput { let result = compute_engine_states(EngineStateInput {
@@ -370,7 +370,7 @@ impl<'a> SetupStateMachine<'a> {
// If the declined engines differ from remote, fix that. // If the declined engines differ from remote, fix that.
let fixed_declined = if result.declined != initial_global_declined { let fixed_declined = if result.declined != initial_global_declined {
global.declined = result.declined.iter().cloned().collect(); global.declined = result.declined.iter().cloned().collect();
log::info!( info!(
"Uploading new declined {:?} to meta/global with timestamp {:?}", "Uploading new declined {:?} to meta/global with timestamp {:?}",
global.declined, global.declined,
global_timestamp, global_timestamp,
@@ -381,7 +381,7 @@ impl<'a> SetupStateMachine<'a> {
}; };
// If there are missing syncIds, we need to fix those as well // If there are missing syncIds, we need to fix those as well
let fixed_ids = if fixup_meta_global(&mut global) { let fixed_ids = if fixup_meta_global(&mut global) {
log::info!( info!(
"Uploading corrected meta/global with timestamp {:?}", "Uploading corrected meta/global with timestamp {:?}",
global_timestamp, global_timestamp,
); );
@@ -393,14 +393,14 @@ impl<'a> SetupStateMachine<'a> {
if fixed_declined || fixed_ids { if fixed_declined || fixed_ids {
global_timestamp = global_timestamp =
self.client.put_meta_global(global_timestamp, &global)?; self.client.put_meta_global(global_timestamp, &global)?;
log::debug!("new global_timestamp: {:?}", global_timestamp); debug!("new global_timestamp: {:?}", global_timestamp);
} }
// Update the set of changes needed. // Update the set of changes needed.
if self.changes_needed.is_some() { if self.changes_needed.is_some() {
// Should never happen (we prevent state machine // Should never happen (we prevent state machine
// loops elsewhere) but if it did, the info is stale // loops elsewhere) but if it did, the info is stale
// anyway. // anyway.
log::warn!("Already have a set of changes needed, Overwriting..."); warn!("Already have a set of changes needed, Overwriting...");
} }
self.changes_needed = Some(result.changes_needed); self.changes_needed = Some(result.changes_needed);
Ok(InitialWithMetaGlobal { Ok(InitialWithMetaGlobal {
@@ -487,11 +487,11 @@ impl<'a> SetupStateMachine<'a> {
FreshStartRequired { config } => { FreshStartRequired { config } => {
// Wipe the server. // Wipe the server.
log::info!("Fresh start: wiping remote"); info!("Fresh start: wiping remote");
self.client.wipe_all_remote()?; self.client.wipe_all_remote()?;
// Upload a fresh `meta/global`... // Upload a fresh `meta/global`...
log::info!("Uploading meta/global"); info!("Uploading meta/global");
let computed = compute_engine_states(EngineStateInput { let computed = compute_engine_states(EngineStateInput {
local_declined: self.pgs.get_declined().iter().cloned().collect(), local_declined: self.pgs.get_declined().iter().cloned().collect(),
user_changes: self.engine_updates.cloned().unwrap_or_default(), user_changes: self.engine_updates.cloned().unwrap_or_default(),
@@ -531,7 +531,7 @@ impl<'a> SetupStateMachine<'a> {
loop { loop {
self.interruptee.err_if_interrupted()?; self.interruptee.err_if_interrupted()?;
let label = &s.label(); let label = &s.label();
log::trace!("global state: {:?}", label); trace!("global state: {:?}", label);
match s { match s {
Ready { state } => { Ready { state } => {
self.sequence.push(label); self.sequence.push(label);
@@ -757,7 +757,7 @@ mod tests {
#[test] #[test]
fn test_state_machine_ready_from_empty() { fn test_state_machine_ready_from_empty() {
nss::ensure_initialized(); nss::ensure_initialized();
let _ = env_logger::try_init(); error_support::init_for_tests();
let root_key = KeyBundle::new_random().unwrap(); let root_key = KeyBundle::new_random().unwrap();
let keys = CollectionKeys { let keys = CollectionKeys {
timestamp: ServerTimestamp(123_400), timestamp: ServerTimestamp(123_400),
@@ -815,7 +815,7 @@ mod tests {
#[test] #[test]
fn test_from_previous_state_declined() { fn test_from_previous_state_declined() {
nss::ensure_initialized(); nss::ensure_initialized();
let _ = env_logger::try_init(); error_support::init_for_tests();
// The state-machine sequence where we didn't use the previous state // The state-machine sequence where we didn't use the previous state
// (ie, where the state machine restarted) // (ie, where the state machine restarted)
let sm_seq_restarted = vec![ let sm_seq_restarted = vec![

View File

@@ -8,7 +8,7 @@ use super::request::{
use super::token; use super::token;
use crate::bso::{IncomingBso, IncomingEncryptedBso, OutgoingBso, OutgoingEncryptedBso}; use crate::bso::{IncomingBso, IncomingEncryptedBso, OutgoingBso, OutgoingEncryptedBso};
use crate::engine::{CollectionPost, CollectionRequest}; use crate::engine::{CollectionPost, CollectionRequest};
use crate::error::{self, Error, ErrorResponse}; use crate::error::{self, debug, info, trace, warn, Error, ErrorResponse};
use crate::record_types::MetaGlobalRecord; use crate::record_types::MetaGlobalRecord;
use crate::{CollectionName, Guid, ServerTimestamp}; use crate::{CollectionName, Guid, ServerTimestamp};
use serde_json::Value; use serde_json::Value;
@@ -37,7 +37,7 @@ fn parse_seconds(seconds_str: &str) -> Option<u32> {
let secs = seconds_str.parse::<f64>().ok()?.ceil(); let secs = seconds_str.parse::<f64>().ok()?.ceil();
// Note: u32 doesn't impl TryFrom<f64> :( // Note: u32 doesn't impl TryFrom<f64> :(
if !secs.is_finite() || secs < 0.0 || secs > f64::from(u32::MAX) { if !secs.is_finite() || secs < 0.0 || secs > f64::from(u32::MAX) {
log::warn!("invalid backoff value: {}", secs); warn!("invalid backoff value: {}", secs);
None None
} else { } else {
Some(secs as u32) Some(secs as u32)
@@ -77,10 +77,9 @@ impl<T> Sync15ClientResponse<T> {
.get(header_names::X_LAST_MODIFIED) .get(header_names::X_LAST_MODIFIED)
.and_then(|s| ServerTimestamp::from_str(s).ok()) .and_then(|s| ServerTimestamp::from_str(s).ok())
.ok_or(Error::MissingServerTimestamp)?; .ok_or(Error::MissingServerTimestamp)?;
log::info!( info!(
"Successful request to \"{}\", incoming x-last-modified={:?}", "Successful request to \"{}\", incoming x-last-modified={:?}",
route, route, last_modified
last_modified
); );
Sync15ClientResponse::Success { Sync15ClientResponse::Success {
@@ -91,7 +90,7 @@ impl<T> Sync15ClientResponse<T> {
} }
} else { } else {
let status = resp.status; let status = resp.status;
log::info!("Request \"{}\" was an error (status={})", route, status); info!("Request \"{}\" was an error (status={})", route, status);
match status { match status {
404 => Sync15ClientResponse::Error(ErrorResponse::NotFound { route }), 404 => Sync15ClientResponse::Error(ErrorResponse::NotFound { route }),
401 => Sync15ClientResponse::Error(ErrorResponse::Unauthorized { route }), 401 => Sync15ClientResponse::Error(ErrorResponse::Unauthorized { route }),
@@ -110,7 +109,7 @@ impl<T> Sync15ClientResponse<T> {
// This should never happen as callers are expected to have // This should never happen as callers are expected to have
// already special-cased this response, so warn if it does. // already special-cased this response, so warn if it does.
// (or maybe we could panic?) // (or maybe we could panic?)
log::warn!("Converting success response into an error"); warn!("Converting success response into an error");
ErrorResponse::RequestFailed { status, route } ErrorResponse::RequestFailed { status, route }
} }
Sync15ClientResponse::Error(e) => e, Sync15ClientResponse::Error(e) => e,
@@ -225,10 +224,9 @@ impl SetupStorageClient for Sync15StorageClient {
route, route,
status, status,
} => { } => {
log::debug!( debug!(
"Got meta global with modified = {}; last-modified = {}", "Got meta global with modified = {}; last-modified = {}",
record.envelope.modified, record.envelope.modified, last_modified
last_modified
); );
Sync15ClientResponse::Success { Sync15ClientResponse::Success {
record: serde_json::from_str(&record.payload)?, record: serde_json::from_str(&record.payload)?,
@@ -332,7 +330,7 @@ impl Sync15StorageClient {
where where
for<'a> T: serde::de::Deserialize<'a>, for<'a> T: serde::de::Deserialize<'a>,
{ {
log::trace!( trace!(
"request: {} {} ({:?})", "request: {} {} ({:?})",
req.method, req.method,
req.url.path(), req.url.path(),
@@ -410,7 +408,7 @@ impl Sync15StorageClient {
pub(crate) fn wipe_remote_engine(&self, engine: &str) -> error::Result<()> { pub(crate) fn wipe_remote_engine(&self, engine: &str) -> error::Result<()> {
let s = self.tsc.api_endpoint()? + "/"; let s = self.tsc.api_endpoint()? + "/";
let url = Url::parse(&s)?.join(&format!("storage/{}", engine))?; let url = Url::parse(&s)?.join(&format!("storage/{}", engine))?;
log::debug!("Wiping: {:?}", url); debug!("Wiping: {:?}", url);
let req = self.build_request(Method::Delete, url)?; let req = self.build_request(Method::Delete, url)?;
match self.exec_request::<Value>(req, false) { match self.exec_request::<Value>(req, false) {
Ok(Sync15ClientResponse::Error(ErrorResponse::NotFound { .. })) Ok(Sync15ClientResponse::Error(ErrorResponse::NotFound { .. }))

View File

@@ -5,7 +5,7 @@
use super::{CollectionUpdate, GlobalState, LocalCollStateMachine, Sync15StorageClient}; use super::{CollectionUpdate, GlobalState, LocalCollStateMachine, Sync15StorageClient};
use crate::clients_engine; use crate::clients_engine;
use crate::engine::SyncEngine; use crate::engine::SyncEngine;
use crate::error::Error; use crate::error::{info, warn, Error};
use crate::telemetry; use crate::telemetry;
use crate::KeyBundle; use crate::KeyBundle;
use interrupt_support::Interruptee; use interrupt_support::Interruptee;
@@ -22,14 +22,14 @@ pub fn synchronize_with_clients_engine(
interruptee: &dyn Interruptee, interruptee: &dyn Interruptee,
) -> Result<(), Error> { ) -> Result<(), Error> {
let collection = engine.collection_name(); let collection = engine.collection_name();
log::info!("Syncing collection {}", collection); info!("Syncing collection {}", collection);
// our global state machine is ready - get the collection machine going. // our global state machine is ready - get the collection machine going.
let coll_state = match LocalCollStateMachine::get_state(engine, global_state, root_sync_key)? { let coll_state = match LocalCollStateMachine::get_state(engine, global_state, root_sync_key)? {
Some(coll_state) => coll_state, Some(coll_state) => coll_state,
None => { None => {
// XXX - this is either "error" or "declined". // XXX - this is either "error" or "declined".
log::warn!( warn!(
"can't setup for the {} collection - hopefully it works later", "can't setup for the {} collection - hopefully it works later",
collection collection
); );
@@ -44,7 +44,7 @@ pub fn synchronize_with_clients_engine(
// We assume an "engine" manages exactly one "collection" with the engine's name. // We assume an "engine" manages exactly one "collection" with the engine's name.
match engine.get_collection_request(coll_state.last_modified)? { match engine.get_collection_request(coll_state.last_modified)? {
None => { None => {
log::info!("skipping incoming for {} - not needed.", collection); info!("skipping incoming for {} - not needed.", collection);
} }
Some(collection_request) => { Some(collection_request) => {
// Ideally we would "batch" incoming records (eg, fetch just 1000 at a time) // Ideally we would "batch" incoming records (eg, fetch just 1000 at a time)
@@ -64,7 +64,7 @@ pub fn synchronize_with_clients_engine(
// For this reason, an engine can't really trust a server timestamp until the // For this reason, an engine can't really trust a server timestamp until the
// very end when we know we've staged them all. // very end when we know we've staged them all.
let incoming = super::fetch_incoming(client, &coll_state, collection_request)?; let incoming = super::fetch_incoming(client, &coll_state, collection_request)?;
log::info!("Downloaded {} remote changes", incoming.len()); info!("Downloaded {} remote changes", incoming.len());
engine.stage_incoming(incoming, telem_engine)?; engine.stage_incoming(incoming, telem_engine)?;
interruptee.err_if_interrupted()?; interruptee.err_if_interrupted()?;
} }
@@ -74,7 +74,7 @@ pub fn synchronize_with_clients_engine(
// It *might* even make sense to only call `apply()` when something was staged, // It *might* even make sense to only call `apply()` when something was staged,
// but that's not clear - see the discussion at // but that's not clear - see the discussion at
// https://github.com/mozilla/application-services/pull/5441/files/f36274f455a6299f10e7ce56b167882c369aa806#r1189267540 // https://github.com/mozilla/application-services/pull/5441/files/f36274f455a6299f10e7ce56b167882c369aa806#r1189267540
log::info!("Applying changes"); info!("Applying changes");
let outgoing = engine.apply(coll_state.last_modified, telem_engine)?; let outgoing = engine.apply(coll_state.last_modified, telem_engine)?;
interruptee.err_if_interrupted()?; interruptee.err_if_interrupted()?;
@@ -83,7 +83,7 @@ pub fn synchronize_with_clients_engine(
// engine about the successful server batch commit. // engine about the successful server batch commit.
// Most stuff below should be called per-batch rather than at the successful end of all // Most stuff below should be called per-batch rather than at the successful end of all
// batches, but that's not trivial. // batches, but that's not trivial.
log::info!("Uploading {} outgoing changes", outgoing.len()); info!("Uploading {} outgoing changes", outgoing.len());
let upload_info = CollectionUpdate::new_from_changeset( let upload_info = CollectionUpdate::new_from_changeset(
client, client,
&coll_state, &coll_state,
@@ -92,7 +92,7 @@ pub fn synchronize_with_clients_engine(
fully_atomic, fully_atomic,
)? )?
.upload()?; .upload()?;
log::info!( info!(
"Upload success ({} records success, {} records failed)", "Upload success ({} records success, {} records failed)",
upload_info.successful_ids.len(), upload_info.successful_ids.len(),
upload_info.failed_ids.len() upload_info.failed_ids.len()
@@ -109,6 +109,6 @@ pub fn synchronize_with_clients_engine(
engine.sync_finished()?; engine.sync_finished()?;
log::info!("Sync finished!"); info!("Sync finished!");
Ok(()) Ok(())
} }

View File

@@ -10,7 +10,7 @@ use super::status::{ServiceStatus, SyncResult};
use super::storage_client::{BackoffListener, Sync15StorageClient, Sync15StorageClientInit}; use super::storage_client::{BackoffListener, Sync15StorageClient, Sync15StorageClientInit};
use crate::clients_engine::{self, CommandProcessor, CLIENTS_TTL_REFRESH}; use crate::clients_engine::{self, CommandProcessor, CLIENTS_TTL_REFRESH};
use crate::engine::{EngineSyncAssociation, SyncEngine}; use crate::engine::{EngineSyncAssociation, SyncEngine};
use crate::error::Error; use crate::error::{debug, info, trace, warn, Error};
use crate::telemetry; use crate::telemetry;
use crate::KeyBundle; use crate::KeyBundle;
use interrupt_support::Interruptee; use interrupt_support::Interruptee;
@@ -125,7 +125,7 @@ pub fn sync_multiple_with_command_processor(
interruptee: &dyn Interruptee, interruptee: &dyn Interruptee,
req_info: Option<SyncRequestInfo<'_>>, req_info: Option<SyncRequestInfo<'_>>,
) -> SyncResult { ) -> SyncResult {
log::info!("Syncing {} engines", engines.len()); info!("Syncing {} engines", engines.len());
let mut sync_result = SyncResult { let mut sync_result = SyncResult {
service_status: ServiceStatus::OtherError, service_status: ServiceStatus::OtherError,
result: Ok(()), result: Ok(()),
@@ -152,16 +152,15 @@ pub fn sync_multiple_with_command_processor(
}; };
match driver.sync() { match driver.sync() {
Ok(()) => { Ok(()) => {
log::debug!( debug!(
"sync was successful, final status={:?}", "sync was successful, final status={:?}",
sync_result.service_status sync_result.service_status
); );
} }
Err(e) => { Err(e) => {
log::warn!( warn!(
"sync failed: {}, final status={:?}", "sync failed: {}, final status={:?}",
e, e, sync_result.service_status,
sync_result.service_status,
); );
sync_result.result = Err(e); sync_result.result = Err(e);
} }
@@ -170,7 +169,7 @@ pub fn sync_multiple_with_command_processor(
// ignoring it during the sync // ignoring it during the sync
sync_result.set_sync_after(backoff.get_required_wait(false).unwrap_or_default()); sync_result.set_sync_after(backoff.get_required_wait(false).unwrap_or_default());
mem_cached_state.next_sync_after = sync_result.next_sync_after; mem_cached_state.next_sync_after = sync_result.next_sync_after;
log::trace!("Sync result: {:?}", sync_result); trace!("Sync result: {:?}", sync_result);
sync_result sync_result
} }
@@ -202,17 +201,17 @@ struct SyncMultipleDriver<'info, 'res, 'pgs, 'mcs> {
impl SyncMultipleDriver<'_, '_, '_, '_> { impl SyncMultipleDriver<'_, '_, '_, '_> {
/// The actual worker for sync_multiple. /// The actual worker for sync_multiple.
fn sync(mut self) -> result::Result<(), Error> { fn sync(mut self) -> result::Result<(), Error> {
log::info!("Loading/initializing persisted state"); info!("Loading/initializing persisted state");
let mut pgs = self.prepare_persisted_state(); let mut pgs = self.prepare_persisted_state();
log::info!("Preparing client info"); info!("Preparing client info");
let client_info = self.prepare_client_info()?; let client_info = self.prepare_client_info()?;
if self.was_interrupted() { if self.was_interrupted() {
return Ok(()); return Ok(());
} }
log::info!("Entering sync state machine"); info!("Entering sync state machine");
// Advance the state machine to the point where it can perform a full // Advance the state machine to the point where it can perform a full
// sync. This may involve uploading meta/global, crypto/keys etc. // sync. This may involve uploading meta/global, crypto/keys etc.
let mut global_state = self.run_state_machine(&client_info, &mut pgs)?; let mut global_state = self.run_state_machine(&client_info, &mut pgs)?;
@@ -226,7 +225,7 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
self.result.service_status = ServiceStatus::Ok; self.result.service_status = ServiceStatus::Ok;
let clients_engine = if let Some(command_processor) = self.command_processor { let clients_engine = if let Some(command_processor) = self.command_processor {
log::info!("Synchronizing clients engine"); info!("Synchronizing clients engine");
let should_refresh = self.mem_cached_state.should_refresh_client(); let should_refresh = self.mem_cached_state.should_refresh_client();
let mut engine = clients_engine::Engine::new(command_processor, self.interruptee); let mut engine = clients_engine::Engine::new(command_processor, self.interruptee);
if let Err(e) = engine.sync( if let Err(e) = engine.sync(
@@ -258,16 +257,16 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
None None
}; };
log::info!("Synchronizing engines"); info!("Synchronizing engines");
let telem_sync = let telem_sync =
self.sync_engines(&client_info, &mut global_state, clients_engine.as_ref()); self.sync_engines(&client_info, &mut global_state, clients_engine.as_ref());
self.result.telemetry.sync(telem_sync); self.result.telemetry.sync(telem_sync);
log::info!("Finished syncing engines."); info!("Finished syncing engines.");
if !self.saw_auth_error { if !self.saw_auth_error {
log::trace!("Updating persisted global state"); trace!("Updating persisted global state");
self.mem_cached_state.last_client_info = Some(client_info); self.mem_cached_state.last_client_info = Some(client_info);
self.mem_cached_state.last_global_state = Some(global_state); self.mem_cached_state.last_global_state = Some(global_state);
} }
@@ -277,7 +276,7 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
fn was_interrupted(&mut self) -> bool { fn was_interrupted(&mut self) -> bool {
if self.interruptee.was_interrupted() { if self.interruptee.was_interrupted() {
log::info!("Interrupted, bailing out"); info!("Interrupted, bailing out");
self.result.service_status = ServiceStatus::Interrupted; self.result.service_status = ServiceStatus::Interrupted;
true true
} else { } else {
@@ -299,14 +298,14 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
.get_required_wait(self.ignore_soft_backoff) .get_required_wait(self.ignore_soft_backoff)
.is_some() .is_some()
{ {
log::warn!("Got backoff, bailing out of sync early"); warn!("Got backoff, bailing out of sync early");
break; break;
} }
if global_state.global.declined.iter().any(|e| e == &*name) { if global_state.global.declined.iter().any(|e| e == &*name) {
log::info!("The {} engine is declined. Skipping", name); info!("The {} engine is declined. Skipping", name);
continue; continue;
} }
log::info!("Syncing {} engine!", name); info!("Syncing {} engine!", name);
let mut telem_engine = telemetry::Engine::new(&*name); let mut telem_engine = telemetry::Engine::new(&*name);
let result = super::sync::synchronize_with_clients_engine( let result = super::sync::synchronize_with_clients_engine(
@@ -321,9 +320,9 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
); );
match result { match result {
Ok(()) => log::info!("Sync of {} was successful!", name), Ok(()) => info!("Sync of {} was successful!", name),
Err(ref e) => { Err(ref e) => {
log::warn!("Sync of {} failed! {:?}", name, e); warn!("Sync of {} failed! {:?}", name, e);
let this_status = ServiceStatus::from_err(e); let this_status = ServiceStatus::from_err(e);
// The only error which forces us to discard our state is an // The only error which forces us to discard our state is an
// auth error. // auth error.
@@ -364,7 +363,7 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
self.interruptee, self.interruptee,
); );
log::info!("Advancing state machine to ready (full)"); info!("Advancing state machine to ready (full)");
let res = state_machine.run_to_ready(last_state); let res = state_machine.run_to_ready(last_state);
// Grab this now even though we don't need it until later to avoid a // Grab this now even though we don't need it until later to avoid a
// lifetime issue // lifetime issue
@@ -376,7 +375,7 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
// Now that we've gone through the state machine, engine the declined list in // Now that we've gone through the state machine, engine the declined list in
// the sync_result // the sync_result
self.result.declined = Some(pgs.get_declined().to_vec()); self.result.declined = Some(pgs.get_declined().to_vec());
log::debug!( debug!(
"Declined engines list after state machine set to: {:?}", "Declined engines list after state machine set to: {:?}",
self.result.declined, self.result.declined,
); );
@@ -406,14 +405,14 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
return Ok(()); return Ok(());
} }
for e in &changes.remote_wipes { for e in &changes.remote_wipes {
log::info!("Engine {:?} just got disabled locally, wiping server", e); info!("Engine {:?} just got disabled locally, wiping server", e);
client.wipe_remote_engine(e)?; client.wipe_remote_engine(e)?;
} }
for s in self.engines { for s in self.engines {
let name = s.collection_name(); let name = s.collection_name();
if changes.local_resets.contains(&*name) { if changes.local_resets.contains(&*name) {
log::info!("Resetting engine {}, as it was declined remotely", name); info!("Resetting engine {}, as it was declined remotely", name);
s.reset(&EngineSyncAssociation::Disconnected)?; s.reset(&EngineSyncAssociation::Disconnected)?;
} }
} }
@@ -429,17 +428,17 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
// reuse the client or the memory cached state. We do keep the disk // reuse the client or the memory cached state. We do keep the disk
// state as currently that's only the declined list. // state as currently that's only the declined list.
if client_info.client_init != *self.storage_init { if client_info.client_init != *self.storage_init {
log::info!("Discarding all state as the account might have changed"); info!("Discarding all state as the account might have changed");
*self.mem_cached_state = MemoryCachedState::default(); *self.mem_cached_state = MemoryCachedState::default();
ClientInfo::new(self.storage_init)? ClientInfo::new(self.storage_init)?
} else { } else {
log::debug!("Reusing memory-cached client_info"); debug!("Reusing memory-cached client_info");
// we can reuse it (which should be the common path) // we can reuse it (which should be the common path)
client_info client_info
} }
} }
None => { None => {
log::debug!("mem_cached_state was stale or missing, need setup"); debug!("mem_cached_state was stale or missing, need setup");
// We almost certainly have no other state here, but to be safe, we // We almost certainly have no other state here, but to be safe, we
// throw away any memory state we do have. // throw away any memory state we do have.
self.mem_cached_state.clear_sensitive_info(); self.mem_cached_state.clear_sensitive_info();
@@ -460,7 +459,7 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
Some(persisted_string) if !persisted_string.is_empty() => { Some(persisted_string) if !persisted_string.is_empty() => {
match serde_json::from_str::<PersistedGlobalState>(persisted_string) { match serde_json::from_str::<PersistedGlobalState>(persisted_string) {
Ok(state) => { Ok(state) => {
log::trace!("Read persisted state: {:?}", state); trace!("Read persisted state: {:?}", state);
// Note that we don't set `result.declined` from the // Note that we don't set `result.declined` from the
// data in state - it remains None, which explicitly // data in state - it remains None, which explicitly
// indicates "we don't have updated info". // indicates "we don't have updated info".
@@ -479,7 +478,7 @@ impl SyncMultipleDriver<'_, '_, '_, '_> {
} }
} }
_ => { _ => {
log::info!( info!(
"The application didn't give us persisted state - \ "The application didn't give us persisted state - \
this is only expected on the very first run for a given user." this is only expected on the very first run for a given user."
); );

View File

@@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::error::{self, Error as ErrorKind, Result}; use crate::error::{self, debug, trace, warn, Error as ErrorKind, Result};
use crate::ServerTimestamp; use crate::ServerTimestamp;
use rc_crypto::hawk; use rc_crypto::hawk;
use serde_derive::*; use serde_derive::*;
@@ -99,7 +99,7 @@ impl TokenServerFetcher {
impl TokenFetcher for TokenServerFetcher { impl TokenFetcher for TokenServerFetcher {
fn fetch_token(&self) -> Result<TokenFetchResult> { fn fetch_token(&self) -> Result<TokenFetchResult> {
log::debug!("Fetching token from {}", self.server_url); debug!("Fetching token from {}", self.server_url);
let resp = Request::get(self.server_url.clone()) let resp = Request::get(self.server_url.clone())
.header( .header(
header_names::AUTHORIZATION, header_names::AUTHORIZATION,
@@ -109,9 +109,9 @@ impl TokenFetcher for TokenServerFetcher {
.send()?; .send()?;
if !resp.is_success() { if !resp.is_success() {
log::warn!("Non-success status when fetching token: {}", resp.status); warn!("Non-success status when fetching token: {}", resp.status);
// TODO: the body should be JSON and contain a status parameter we might need? // TODO: the body should be JSON and contain a status parameter we might need?
log::trace!(" Response body {}", resp.text()); trace!(" Response body {}", resp.text());
// XXX - shouldn't we "chain" these errors - ie, a BackoffError could // XXX - shouldn't we "chain" these errors - ie, a BackoffError could
// have a TokenserverHttpError as its cause? // have a TokenserverHttpError as its cause?
if let Some(res) = resp.headers.get_as::<f64, _>(header_names::RETRY_AFTER) { if let Some(res) = resp.headers.get_as::<f64, _>(header_names::RETRY_AFTER) {
@@ -288,10 +288,9 @@ impl<TF: TokenFetcher> TokenProviderImpl<TF> {
if prev == tc.token.api_endpoint { if prev == tc.token.api_endpoint {
TokenState::Token(tc) TokenState::Token(tc)
} else { } else {
log::warn!( warn!(
"api_endpoint changed from {} to {}", "api_endpoint changed from {} to {}",
prev, prev, tc.token.api_endpoint
tc.token.api_endpoint
); );
TokenState::NodeReassigned TokenState::NodeReassigned
} }
@@ -331,7 +330,7 @@ impl<TF: TokenFetcher> TokenProviderImpl<TF> {
} }
TokenState::Backoff(ref until, ref existing_endpoint) => { TokenState::Backoff(ref until, ref existing_endpoint) => {
if let Ok(remaining) = until.duration_since(self.fetcher.now()) { if let Ok(remaining) = until.duration_since(self.fetcher.now()) {
log::debug!("enforcing existing backoff - {:?} remains", remaining); debug!("enforcing existing backoff - {:?} remains", remaining);
None None
} else { } else {
// backoff period is over // backoff period is over

View File

@@ -11,7 +11,8 @@ use crate::client::{
}; };
use crate::client_types::{ClientData, RemoteClient}; use crate::client_types::{ClientData, RemoteClient};
use crate::engine::CollectionRequest; use crate::engine::CollectionRequest;
use crate::{error::Result, Guid, KeyBundle}; use crate::error::{debug, info, warn, Result};
use crate::{Guid, KeyBundle};
use interrupt_support::Interruptee; use interrupt_support::Interruptee;
use super::{ use super::{
@@ -67,18 +68,18 @@ impl<'a> Driver<'a> {
let client: ClientRecord = match content.kind { let client: ClientRecord = match content.kind {
IncomingKind::Malformed => { IncomingKind::Malformed => {
log::debug!("Error unpacking record"); debug!("Error unpacking record");
continue; continue;
} }
IncomingKind::Tombstone => { IncomingKind::Tombstone => {
log::debug!("Record has been deleted; skipping..."); debug!("Record has been deleted; skipping...");
continue; continue;
} }
IncomingKind::Content(client) => client, IncomingKind::Content(client) => client,
}; };
if client.id == self.command_processor.settings().fxa_device_id { if client.id == self.command_processor.settings().fxa_device_id {
log::debug!("Found my record on the server"); debug!("Found my record on the server");
// If we see our own client record, apply any incoming commands, // If we see our own client record, apply any incoming commands,
// remove them from the list, and reupload the record. Any // remove them from the list, and reupload the record. Any
// commands that we don't understand also go back in the list. // commands that we don't understand also go back in the list.
@@ -94,10 +95,10 @@ impl<'a> Driver<'a> {
match status { match status {
CommandStatus::Applied => {} CommandStatus::Applied => {}
CommandStatus::Ignored => { CommandStatus::Ignored => {
log::debug!("Ignored command {:?}", c); debug!("Ignored command {:?}", c);
} }
CommandStatus::Unsupported => { CommandStatus::Unsupported => {
log::warn!("Don't know how to apply command {:?}", c); warn!("Don't know how to apply command {:?}", c);
current_client_record.commands.push(c.clone()); current_client_record.commands.push(c.clone());
} }
} }
@@ -120,7 +121,7 @@ impl<'a> Driver<'a> {
// We periodically upload our own client record, even if it // We periodically upload our own client record, even if it
// doesn't change, to keep it fresh. // doesn't change, to keep it fresh.
if should_refresh_client || client != current_client_record { if should_refresh_client || client != current_client_record {
log::debug!("Will update our client record on the server"); debug!("Will update our client record on the server");
let envelope = OutgoingEnvelope { let envelope = OutgoingEnvelope {
id: content.envelope.id, id: content.envelope.id,
ttl: Some(CLIENTS_TTL), ttl: Some(CLIENTS_TTL),
@@ -276,7 +277,7 @@ impl Engine<'_> {
root_sync_key: &KeyBundle, root_sync_key: &KeyBundle,
should_refresh_client: bool, should_refresh_client: bool,
) -> Result<()> { ) -> Result<()> {
log::info!("Syncing collection clients"); info!("Syncing collection clients");
let coll_keys = CollectionKeys::from_encrypted_payload( let coll_keys = CollectionKeys::from_encrypted_payload(
global_state.keys.clone(), global_state.keys.clone(),
@@ -314,13 +315,13 @@ impl Engine<'_> {
)? )?
.upload()?; .upload()?;
log::info!( info!(
"Upload success ({} records success, {} records failed)", "Upload success ({} records success, {} records failed)",
upload_info.successful_ids.len(), upload_info.successful_ids.len(),
upload_info.failed_ids.len() upload_info.failed_ids.len()
); );
log::info!("Finished syncing clients"); info!("Finished syncing clients");
Ok(()) Ok(())
} }

View File

@@ -2,6 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::error::error;
use serde_derive::*; use serde_derive::*;
use super::Command; use super::Command;
@@ -98,12 +99,12 @@ impl CommandRecord {
match &self.args[0] { match &self.args[0] {
Some(name) => Some(name.into()), Some(name) => Some(name.into()),
None => { None => {
log::error!("Incoming '{cmd_name}' command has null argument"); error!("Incoming '{cmd_name}' command has null argument");
None None
} }
} }
} else { } else {
log::error!( error!(
"Incoming '{cmd_name}' command has wrong number of arguments ({})", "Incoming '{cmd_name}' command has wrong number of arguments ({})",
self.args.len() self.args.len()
); );
@@ -121,7 +122,7 @@ impl CommandRecord {
if self.args.is_empty() { if self.args.is_empty() {
Some(Command::ResetAll) Some(Command::ResetAll)
} else { } else {
log::error!("Invalid arguments for 'resetAll' command"); error!("Invalid arguments for 'resetAll' command");
None None
} }
} }

View File

@@ -2,6 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::error::debug;
use crate::{telemetry, ServerTimestamp}; use crate::{telemetry, ServerTimestamp};
use anyhow::Result; use anyhow::Result;
@@ -144,7 +145,7 @@ impl<A: BridgedEngineAdaptor> BridgedEngine for A {
let engine = self.engine(); let engine = self.engine();
let assoc = engine.get_sync_assoc()?; let assoc = engine.get_sync_assoc()?;
if matches!(assoc, EngineSyncAssociation::Connected(c) if c.coll == sync_id) { if matches!(assoc, EngineSyncAssociation::Connected(c) if c.coll == sync_id) {
log::debug!("ensure_current_sync_id is current"); debug!("ensure_current_sync_id is current");
} else { } else {
let new_coll_ids = CollSyncIds { let new_coll_ids = CollSyncIds {
global: Guid::empty(), global: Guid::empty(),

View File

@@ -4,6 +4,10 @@
use interrupt_support::Interrupted; use interrupt_support::Interrupted;
// reexport logging helpers.
#[allow(unused_imports)] // some only used with certain features.
pub use error_support::{debug, error, info, trace, warn};
/// This enum is to discriminate `StorageHttpError`, and not used as an error. /// This enum is to discriminate `StorageHttpError`, and not used as an error.
#[cfg(feature = "sync-client")] #[cfg(feature = "sync-client")]
#[derive(Debug, Clone)] #[derive(Debug, Clone)]

View File

@@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::error::{Error, Result}; use crate::error::{warn, Error, Result};
use base64::{ use base64::{
engine::general_purpose::{STANDARD, URL_SAFE_NO_PAD}, engine::general_purpose::{STANDARD, URL_SAFE_NO_PAD},
Engine, Engine,
@@ -108,7 +108,7 @@ impl KeyBundle {
// robust and avoids an allocation. // robust and avoids an allocation.
let mut decoded_hmac = vec![0u8; 32]; let mut decoded_hmac = vec![0u8; 32];
if base16::decode_slice(hmac_base16, &mut decoded_hmac).is_err() { if base16::decode_slice(hmac_base16, &mut decoded_hmac).is_err() {
log::warn!("Garbage HMAC verification string: contained non base16 characters"); warn!("Garbage HMAC verification string: contained non base16 characters");
return Err(Error::HmacMismatch); return Err(Error::HmacMismatch);
} }
let iv = STANDARD.decode(iv_base64)?; let iv = STANDARD.decode(iv_base64)?;

View File

@@ -9,6 +9,7 @@ use crate::error::Error;
#[cfg(feature = "sync-client")] #[cfg(feature = "sync-client")]
use crate::error::ErrorResponse; use crate::error::ErrorResponse;
use crate::error::warn;
use std::collections::HashMap; use std::collections::HashMap;
use std::time; use std::time;
@@ -458,10 +459,9 @@ impl Engine {
if self.failure.is_none() { if self.failure.is_none() {
self.failure = Some(failure); self.failure = Some(failure);
} else { } else {
log::warn!( warn!(
"engine already has recorded a failure of {:?} - ignoring {:?}", "engine already has recorded a failure of {:?} - ignoring {:?}",
&self.failure, &self.failure, &failure
&failure
); );
} }
} }
@@ -766,7 +766,7 @@ impl SyncTelemetryPing {
pub fn uid(&mut self, uid: String) { pub fn uid(&mut self, uid: String) {
if let Some(ref existing) = self.uid { if let Some(ref existing) = self.uid {
if *existing != uid { if *existing != uid {
log::warn!("existing uid ${} being replaced by {}", existing, uid); warn!("existing uid ${} being replaced by {}", existing, uid);
} }
} }
self.uid = Some(uid); self.uid = Some(uid);

View File

@@ -1 +1 @@
{"files":{"Cargo.toml":"ffc7a4b8f8d8df651cdf9de08c1da761554c5bd615c215ba6ebbdd8920b6b04c","README.md":"c48b8f391ef822c4f3971b5f453a1e7b43bea232752d520460d2f04803aead1a","build.rs":"33e61b811b19ed2b58e319cc65d5988bed258d2c4fea2d706301184c59847a0f","src/error.rs":"6e5fd48a3f228d37977881a3657f8635b1b37e3b16d91ac2d8476174172a2a74","src/lib.rs":"2e2a7173ec3bf46065a60c34fbdf91254ff720d9d15955cb87b9019dafca7690","src/schema.rs":"510218d465c7d26d6b9f342cc33c14ab83044a67561ef924c33dadb060761972","src/storage.rs":"e3ee12bbaecb754eced07b2f4bcd034b84161c5dcd6dc5cbe62ebc47a92f44d2","src/store.rs":"30d854aa7ad1ee3a3cac683a1ae0b9fb3833c8d90537beafcd3e4b24f6e7c6e8","src/sync/bridge.rs":"18d3a7913a030b598d4b6cbd5b7e2ab4cef4cc7ea964f5bc84d7fb2f28787529","src/sync/engine.rs":"73007423f2a22314a034ac660aa65bd9c50e8aa850c445a66604486280067843","src/sync/mod.rs":"09ba3c87f1174a243bf5aaa481effd18929d54359ceb9b23ccb2c32ee3482f34","src/sync/record.rs":"eef6751c209d039958afbe245ddb006cfdf6b8b6b47f925f69c552b832b87922","src/tabs.udl":"abb9f81705ee7ac277a1696b224404388a793301f973718322e9aa07538f1bc4","uniffi.toml":"70a41bac1bbbde7a571f1b023f22636337ca3bffd6891dd67596fe13ab98b2f6"},"package":null} {"files":{"Cargo.toml":"522f36d0417f7e62fbbff6876eb85eb215d5ee0fb4276785654b72c26b667e71","README.md":"c48b8f391ef822c4f3971b5f453a1e7b43bea232752d520460d2f04803aead1a","build.rs":"33e61b811b19ed2b58e319cc65d5988bed258d2c4fea2d706301184c59847a0f","src/error.rs":"6e5fd48a3f228d37977881a3657f8635b1b37e3b16d91ac2d8476174172a2a74","src/lib.rs":"2e2a7173ec3bf46065a60c34fbdf91254ff720d9d15955cb87b9019dafca7690","src/schema.rs":"698118cabf04cea702dfd2201457afd4238116245438fb50738b5e0a393e3f6c","src/storage.rs":"9edc80923123572482b67ad10e3caab2696840e9e646e1502936f87026a3660e","src/store.rs":"30d854aa7ad1ee3a3cac683a1ae0b9fb3833c8d90537beafcd3e4b24f6e7c6e8","src/sync/bridge.rs":"815d36559e4a061652678a9e798b73de12be05b22495a65e2726a0a4bdca6922","src/sync/engine.rs":"5caa2a9120dec144e8403a9641ad6d357897b1f2c32775271e163f02887281de","src/sync/mod.rs":"09ba3c87f1174a243bf5aaa481effd18929d54359ceb9b23ccb2c32ee3482f34","src/sync/record.rs":"eef6751c209d039958afbe245ddb006cfdf6b8b6b47f925f69c552b832b87922","src/tabs.udl":"abb9f81705ee7ac277a1696b224404388a793301f973718322e9aa07538f1bc4","uniffi.toml":"70a41bac1bbbde7a571f1b023f22636337ca3bffd6891dd67596fe13ab98b2f6"},"package":null}

View File

@@ -34,7 +34,6 @@ path = "src/lib.rs"
[dependencies] [dependencies]
anyhow = "1.0" anyhow = "1.0"
lazy_static = "1.4" lazy_static = "1.4"
log = "0.4"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
serde_json = "1" serde_json = "1"
@@ -77,10 +76,9 @@ version = "0.29.0"
[dev-dependencies] [dev-dependencies]
tempfile = "3.1" tempfile = "3.1"
[dev-dependencies.env_logger] [dev-dependencies.error-support]
version = "0.10.0" path = "../support/error"
features = ["humantime"] features = ["testing"]
default-features = false
[build-dependencies.uniffi] [build-dependencies.uniffi]
version = "0.29.0" version = "0.29.0"

View File

@@ -78,7 +78,7 @@ impl MigrationLogic for TabsMigrationLogic {
} }
fn init(&self, db: &Transaction<'_>) -> MigrationResult<()> { fn init(&self, db: &Transaction<'_>) -> MigrationResult<()> {
log::debug!("Creating schemas"); error_support::debug!("Creating schemas");
init_schema(db)?; init_schema(db)?;
Ok(()) Ok(())
} }

View File

@@ -17,6 +17,7 @@ use crate::schema;
use crate::sync::record::TabsRecord; use crate::sync::record::TabsRecord;
use crate::DeviceType; use crate::DeviceType;
use crate::{PendingCommand, RemoteCommand, Timestamp}; use crate::{PendingCommand, RemoteCommand, Timestamp};
use error_support::{error, info, trace, warn};
use rusqlite::{ use rusqlite::{
types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef}, types::{FromSql, FromSqlError, FromSqlResult, ToSql, ToSqlOutput, ValueRef},
Connection, OpenFlags, Connection, OpenFlags,
@@ -96,7 +97,7 @@ impl TabsStorage {
{ {
if let Err(err) = conn.close() { if let Err(err) = conn.close() {
// Log the error, but continue with shutdown // Log the error, but continue with shutdown
log::error!("Failed to close the connection: {:?}", err); error!("Failed to close the connection: {:?}", err);
} }
} }
} }
@@ -124,7 +125,7 @@ impl TabsStorage {
&crate::schema::TabsMigrationLogic, &crate::schema::TabsMigrationLogic,
) { ) {
Ok(conn) => { Ok(conn) => {
log::info!("tabs storage is opening an existing database"); info!("tabs storage is opening an existing database");
self.db_connection = DbConnection::Open(conn); self.db_connection = DbConnection::Open(conn);
match self.db_connection { match self.db_connection {
DbConnection::Open(ref conn) => Ok(Some(conn)), DbConnection::Open(ref conn) => Ok(Some(conn)),
@@ -134,7 +135,7 @@ impl TabsStorage {
Err(open_database::Error::SqlError(rusqlite::Error::SqliteFailure(code, _))) Err(open_database::Error::SqlError(rusqlite::Error::SqliteFailure(code, _)))
if code.code == rusqlite::ErrorCode::CannotOpen => if code.code == rusqlite::ErrorCode::CannotOpen =>
{ {
log::info!("tabs storage could not open an existing database and hasn't been asked to create one"); info!("tabs storage could not open an existing database and hasn't been asked to create one");
Ok(None) Ok(None)
} }
Err(e) => Err(e.into()), Err(e) => Err(e.into()),
@@ -157,7 +158,7 @@ impl TabsStorage {
flags, flags,
&crate::schema::TabsMigrationLogic, &crate::schema::TabsMigrationLogic,
)?; )?;
log::info!("tabs storage is creating a database connection"); info!("tabs storage is creating a database connection");
self.db_connection = DbConnection::Open(conn); self.db_connection = DbConnection::Open(conn);
match self.db_connection { match self.db_connection {
DbConnection::Open(ref conn) => Ok(conn), DbConnection::Open(ref conn) => Ok(conn),
@@ -168,7 +169,7 @@ impl TabsStorage {
pub fn update_local_state(&mut self, local_state: Vec<RemoteTab>) { pub fn update_local_state(&mut self, local_state: Vec<RemoteTab>) {
let num_tabs = local_state.len(); let num_tabs = local_state.len();
self.local_tabs.borrow_mut().replace(local_state); self.local_tabs.borrow_mut().replace(local_state);
log::info!("update_local_state has {num_tabs} tab entries"); info!("update_local_state has {num_tabs} tab entries");
} }
// We try our best to fit as many tabs in a payload as possible, this includes // We try our best to fit as many tabs in a payload as possible, this includes
@@ -202,7 +203,7 @@ impl TabsStorage {
// Sort the tabs so when we trim tabs it's the oldest tabs // Sort the tabs so when we trim tabs it's the oldest tabs
sanitized_tabs.sort_by(|a, b| b.last_used.cmp(&a.last_used)); sanitized_tabs.sort_by(|a, b| b.last_used.cmp(&a.last_used));
trim_tabs_length(&mut sanitized_tabs, MAX_PAYLOAD_SIZE); trim_tabs_length(&mut sanitized_tabs, MAX_PAYLOAD_SIZE);
log::info!( info!(
"prepare_local_tabs_for_upload found {} tabs", "prepare_local_tabs_for_upload found {} tabs",
sanitized_tabs.len() sanitized_tabs.len()
); );
@@ -210,7 +211,7 @@ impl TabsStorage {
} }
// It's a less than ideal outcome if at startup (or any time) we are asked to // It's a less than ideal outcome if at startup (or any time) we are asked to
// sync tabs before the app has told us what the tabs are, so make noise. // sync tabs before the app has told us what the tabs are, so make noise.
log::warn!("prepare_local_tabs_for_upload - have no local tabs"); warn!("prepare_local_tabs_for_upload - have no local tabs");
None None
} }
@@ -279,7 +280,7 @@ impl TabsStorage {
// so we really should consider just dropping it? (Sadly though, it does seem // so we really should consider just dropping it? (Sadly though, it does seem
// possible it's actually a very recently connected client, so we keep it) // possible it's actually a very recently connected client, so we keep it)
// We should get rid of this eventually - https://github.com/mozilla/application-services/issues/5199 // We should get rid of this eventually - https://github.com/mozilla/application-services/issues/5199
log::info!( info!(
"Storing tabs from a client that doesn't appear in the devices list: {}", "Storing tabs from a client that doesn't appear in the devices list: {}",
id, id,
); );
@@ -375,7 +376,7 @@ impl TabsStorage {
":ttl": client_ttl_ms, ":ttl": client_ttl_ms,
}, },
)?; )?;
log::info!( info!(
"removed {} stale clients (threshold was {})", "removed {} stale clients (threshold was {})",
num_removed, num_removed,
last_sync - client_ttl_ms last_sync - client_ttl_ms
@@ -401,7 +402,7 @@ impl TabsStorage {
for remote_tab in new_remote_tabs { for remote_tab in new_remote_tabs {
let record = &remote_tab.0; let record = &remote_tab.0;
let last_modified = remote_tab.1; let last_modified = remote_tab.1;
log::info!( info!(
"inserting tab for device {}, last modified at {}", "inserting tab for device {}, last modified at {}",
record.id, record.id,
last_modified.as_millis() last_modified.as_millis()
@@ -484,8 +485,8 @@ impl TabsStorage {
) -> Result<bool> { ) -> Result<bool> {
let connection = self.open_or_create()?; let connection = self.open_or_create()?;
let RemoteCommand::CloseTab { url } = command; let RemoteCommand::CloseTab { url } = command;
log::info!("Adding remote command for {device_id} at {time_requested}"); info!("Adding remote command for {device_id} at {time_requested}");
log::trace!("command is {command:?}"); trace!("command is {command:?}");
// tx maybe not needed for single write? // tx maybe not needed for single write?
let tx = connection.unchecked_transaction()?; let tx = connection.unchecked_transaction()?;
let changes = tx.execute_cached( let changes = tx.execute_cached(
@@ -510,7 +511,7 @@ impl TabsStorage {
) -> Result<bool> { ) -> Result<bool> {
let connection = self.open_or_create()?; let connection = self.open_or_create()?;
let RemoteCommand::CloseTab { url } = command; let RemoteCommand::CloseTab { url } = command;
log::info!("removing remote tab close details: client={device_id}"); info!("removing remote tab close details: client={device_id}");
let tx = connection.unchecked_transaction()?; let tx = connection.unchecked_transaction()?;
let changes = tx.execute_cached( let changes = tx.execute_cached(
"DELETE FROM remote_tab_commands "DELETE FROM remote_tab_commands
@@ -547,9 +548,7 @@ impl TabsStorage {
let command = match row.get::<_, CommandKind>(1) { let command = match row.get::<_, CommandKind>(1) {
Ok(c) => c, Ok(c) => c,
Err(e) => { Err(e) => {
log::error!( error!("do_get_pending_commands: ignoring error fetching command: {e:?}");
"do_get_pending_commands: ignoring error fetching command: {e:?}"
);
return Ok(None); return Ok(None);
} }
}; };
@@ -577,8 +576,8 @@ impl TabsStorage {
pub fn set_pending_command_sent(&mut self, command: &PendingCommand) -> Result<bool> { pub fn set_pending_command_sent(&mut self, command: &PendingCommand) -> Result<bool> {
let connection = self.open_or_create()?; let connection = self.open_or_create()?;
let RemoteCommand::CloseTab { url } = &command.command; let RemoteCommand::CloseTab { url } = &command.command;
log::info!("setting remote tab sent: client={}", command.device_id); info!("setting remote tab sent: client={}", command.device_id);
log::trace!("command: {command:?}"); trace!("command: {command:?}");
let tx = connection.unchecked_transaction()?; let tx = connection.unchecked_transaction()?;
let ts = Timestamp::now(); let ts = Timestamp::now();
let changes = tx.execute_cached( let changes = tx.execute_cached(
@@ -657,7 +656,7 @@ impl TabsStorage {
}, },
)?; )?;
log::info!( info!(
"deleted {} pending tab closures because they were not in the new tabs", "deleted {} pending tab closures because they were not in the new tabs",
conn.changes() conn.changes()
); );
@@ -671,7 +670,7 @@ impl TabsStorage {
) AND (SELECT last_modified FROM tabs WHERE guid = device_id) - time_requested >= {REMOTE_COMMAND_TTL_MS} ) AND (SELECT last_modified FROM tabs WHERE guid = device_id) - time_requested >= {REMOTE_COMMAND_TTL_MS}
"); ");
tx.execute_cached(&sql, [])?; tx.execute_cached(&sql, [])?;
log::info!("deleted {} records because they timed out", conn.changes()); info!("deleted {} records because they timed out", conn.changes());
// Commit changes and clean up temp // Commit changes and clean up temp
tx.commit()?; tx.commit()?;
@@ -780,7 +779,7 @@ mod tests {
#[test] #[test]
fn test_open_if_exists_no_file() { fn test_open_if_exists_no_file() {
env_logger::try_init().ok(); error_support::init_for_tests();
let dir = tempfile::tempdir().unwrap(); let dir = tempfile::tempdir().unwrap();
let db_name = dir.path().join("test_open_for_read_no_file.db"); let db_name = dir.path().join("test_open_for_read_no_file.db");
let mut storage = TabsStorage::new(db_name.clone()); let mut storage = TabsStorage::new(db_name.clone());
@@ -794,7 +793,7 @@ mod tests {
#[test] #[test]
fn test_tabs_meta() { fn test_tabs_meta() {
env_logger::try_init().ok(); error_support::init_for_tests();
let dir = tempfile::tempdir().unwrap(); let dir = tempfile::tempdir().unwrap();
let db_name = dir.path().join("test_tabs_meta.db"); let db_name = dir.path().join("test_tabs_meta.db");
let mut db = TabsStorage::new(db_name); let mut db = TabsStorage::new(db_name);
@@ -829,7 +828,7 @@ mod tests {
#[test] #[test]
fn test_prepare_local_tabs_for_upload() { fn test_prepare_local_tabs_for_upload() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = TabsStorage::new_with_mem_path("test_prepare_local_tabs_for_upload"); let mut storage = TabsStorage::new_with_mem_path("test_prepare_local_tabs_for_upload");
assert_eq!(storage.prepare_local_tabs_for_upload(), None); assert_eq!(storage.prepare_local_tabs_for_upload(), None);
storage.update_local_state(vec![ storage.update_local_state(vec![
@@ -888,7 +887,7 @@ mod tests {
} }
#[test] #[test]
fn test_trimming_tab_title() { fn test_trimming_tab_title() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = TabsStorage::new_with_mem_path("test_prepare_local_tabs_for_upload"); let mut storage = TabsStorage::new_with_mem_path("test_prepare_local_tabs_for_upload");
assert_eq!(storage.prepare_local_tabs_for_upload(), None); assert_eq!(storage.prepare_local_tabs_for_upload(), None);
storage.update_local_state(vec![RemoteTab { storage.update_local_state(vec![RemoteTab {
@@ -913,7 +912,7 @@ mod tests {
} }
#[test] #[test]
fn test_utf8_safe_title_trim() { fn test_utf8_safe_title_trim() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = TabsStorage::new_with_mem_path("test_prepare_local_tabs_for_upload"); let mut storage = TabsStorage::new_with_mem_path("test_prepare_local_tabs_for_upload");
assert_eq!(storage.prepare_local_tabs_for_upload(), None); assert_eq!(storage.prepare_local_tabs_for_upload(), None);
storage.update_local_state(vec![ storage.update_local_state(vec![
@@ -957,7 +956,7 @@ mod tests {
} }
#[test] #[test]
fn test_trim_tabs_length() { fn test_trim_tabs_length() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = TabsStorage::new_with_mem_path("test_prepare_local_tabs_for_upload"); let mut storage = TabsStorage::new_with_mem_path("test_prepare_local_tabs_for_upload");
assert_eq!(storage.prepare_local_tabs_for_upload(), None); assert_eq!(storage.prepare_local_tabs_for_upload(), None);
let mut too_many_tabs: Vec<RemoteTab> = Vec::new(); let mut too_many_tabs: Vec<RemoteTab> = Vec::new();
@@ -986,7 +985,7 @@ mod tests {
} }
#[test] #[test]
fn test_remove_stale_clients() { fn test_remove_stale_clients() {
env_logger::try_init().ok(); error_support::init_for_tests();
let dir = tempfile::tempdir().unwrap(); let dir = tempfile::tempdir().unwrap();
let db_name = dir.path().join("test_remove_stale_clients.db"); let db_name = dir.path().join("test_remove_stale_clients.db");
let mut storage = TabsStorage::new(db_name); let mut storage = TabsStorage::new(db_name);
@@ -1063,7 +1062,7 @@ mod tests {
#[test] #[test]
fn test_add_pending_dupe_simple() { fn test_add_pending_dupe_simple() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = TabsStorage::new_with_mem_path("test_add_pending_dupe_simple"); let mut storage = TabsStorage::new_with_mem_path("test_add_pending_dupe_simple");
let command = RemoteCommand::close_tab("https://example1.com"); let command = RemoteCommand::close_tab("https://example1.com");
// returns a bool to say if it's new or not. // returns a bool to say if it's new or not.
@@ -1083,7 +1082,7 @@ mod tests {
#[test] #[test]
fn test_add_pending_remote_close() { fn test_add_pending_remote_close() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = TabsStorage::new_with_mem_path("test_add_pending_remote_close"); let mut storage = TabsStorage::new_with_mem_path("test_add_pending_remote_close");
storage.open_or_create().unwrap(); storage.open_or_create().unwrap();
assert!(storage.open_if_exists().unwrap().is_some()); assert!(storage.open_if_exists().unwrap().is_some());
@@ -1139,7 +1138,7 @@ mod tests {
#[test] #[test]
fn test_remote_tabs_filters_pending_closures() { fn test_remote_tabs_filters_pending_closures() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = let mut storage =
TabsStorage::new_with_mem_path("test_remote_tabs_filters_pending_closures"); TabsStorage::new_with_mem_path("test_remote_tabs_filters_pending_closures");
let records = vec![ let records = vec![
@@ -1251,7 +1250,7 @@ mod tests {
#[test] #[test]
fn test_remove_old_pending_closures_timed_removal() { fn test_remove_old_pending_closures_timed_removal() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = let mut storage =
TabsStorage::new_with_mem_path("test_remove_old_pending_closures_timed_removal"); TabsStorage::new_with_mem_path("test_remove_old_pending_closures_timed_removal");
@@ -1337,7 +1336,7 @@ mod tests {
} }
#[test] #[test]
fn test_remove_old_pending_closures_no_tab_removal() { fn test_remove_old_pending_closures_no_tab_removal() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = let mut storage =
TabsStorage::new_with_mem_path("test_remove_old_pending_closures_no_tab_removal"); TabsStorage::new_with_mem_path("test_remove_old_pending_closures_no_tab_removal");
let db = storage.open_if_exists().unwrap().unwrap(); let db = storage.open_if_exists().unwrap().unwrap();
@@ -1423,7 +1422,7 @@ mod tests {
#[test] #[test]
fn test_remove_pending_command() { fn test_remove_pending_command() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = TabsStorage::new_with_mem_path("test_remove_pending_command"); let mut storage = TabsStorage::new_with_mem_path("test_remove_pending_command");
storage.open_or_create().unwrap(); storage.open_or_create().unwrap();
assert!(storage.open_if_exists().unwrap().is_some()); assert!(storage.open_if_exists().unwrap().is_some());
@@ -1463,7 +1462,7 @@ mod tests {
#[test] #[test]
fn test_sent_command() { fn test_sent_command() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = TabsStorage::new_with_mem_path("test_sent_command"); let mut storage = TabsStorage::new_with_mem_path("test_sent_command");
let command = RemoteCommand::close_tab("https://example1.com"); let command = RemoteCommand::close_tab("https://example1.com");
storage storage
@@ -1496,7 +1495,7 @@ mod tests {
#[test] #[test]
fn test_remove_pending_closures_only_affects_target_device() { fn test_remove_pending_closures_only_affects_target_device() {
env_logger::try_init().ok(); error_support::init_for_tests();
let mut storage = let mut storage =
TabsStorage::new_with_mem_path("test_remove_pending_closures_target_device"); TabsStorage::new_with_mem_path("test_remove_pending_closures_target_device");
let now = Timestamp::now(); let now = Timestamp::now();

View File

@@ -140,7 +140,7 @@ mod tests {
// A copy of the normal "engine" tests but which go via the bridge // A copy of the normal "engine" tests but which go via the bridge
#[test] #[test]
fn test_sync_via_bridge() { fn test_sync_via_bridge() {
env_logger::try_init().ok(); error_support::init_for_tests();
let store = Arc::new(TabsStore::new_with_mem_path("test-bridge_incoming")); let store = Arc::new(TabsStore::new_with_mem_path("test-bridge_incoming"));
@@ -282,7 +282,7 @@ mod tests {
#[test] #[test]
fn test_sync_meta() { fn test_sync_meta() {
env_logger::try_init().ok(); error_support::init_for_tests();
let store = Arc::new(TabsStore::new_with_mem_path("test-meta")); let store = Arc::new(TabsStore::new_with_mem_path("test-meta"));
let bridge = store.bridged_engine(); let bridge = store.bridged_engine();

View File

@@ -7,6 +7,8 @@ use crate::storage::{ClientRemoteTabs, RemoteTab, TABS_CLIENT_TTL};
use crate::store::TabsStore; use crate::store::TabsStore;
use crate::sync::record::{TabsRecord, TabsRecordTab}; use crate::sync::record::{TabsRecord, TabsRecordTab};
use anyhow::Result; use anyhow::Result;
use error_support::{debug, info, trace, warn};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::{Arc, Mutex, RwLock, Weak}; use std::sync::{Arc, Mutex, RwLock, Weak};
use sync15::bso::{IncomingBso, OutgoingBso, OutgoingEnvelope}; use sync15::bso::{IncomingBso, OutgoingBso, OutgoingEnvelope};
@@ -124,7 +126,7 @@ impl TabsEngine {
pub fn set_last_sync(&self, last_sync: ServerTimestamp) -> Result<()> { pub fn set_last_sync(&self, last_sync: ServerTimestamp) -> Result<()> {
let mut storage = self.store.storage.lock().unwrap(); let mut storage = self.store.storage.lock().unwrap();
log::debug!("Updating last sync to {}", last_sync); debug!("Updating last sync to {}", last_sync);
let last_sync_millis = last_sync.as_millis(); let last_sync_millis = last_sync.as_millis();
Ok(storage.put_meta(schema::LAST_SYNC_META_KEY, &last_sync_millis)?) Ok(storage.put_meta(schema::LAST_SYNC_META_KEY, &last_sync_millis)?)
} }
@@ -175,7 +177,7 @@ impl SyncEngine for TabsEngine {
Some(record) => record, Some(record) => record,
None => { None => {
// Invalid record or a "tombstone" which tabs don't have. // Invalid record or a "tombstone" which tabs don't have.
log::warn!("Ignoring incoming invalid tab"); warn!("Ignoring incoming invalid tab");
incoming_telemetry.failed(1); incoming_telemetry.failed(1);
continue; continue;
} }
@@ -231,7 +233,7 @@ impl SyncEngine for TabsEngine {
last_modified: 0, // ignored for outgoing records. last_modified: 0, // ignored for outgoing records.
remote_tabs: local_tabs.to_vec(), remote_tabs: local_tabs.to_vec(),
}; };
log::trace!("outgoing {:?}", local_record); trace!("outgoing {:?}", local_record);
let envelope = OutgoingEnvelope { let envelope = OutgoingEnvelope {
id: local_id.as_str().into(), id: local_id.as_str().into(),
ttl: Some(TABS_CLIENT_TTL), ttl: Some(TABS_CLIENT_TTL),
@@ -248,7 +250,7 @@ impl SyncEngine for TabsEngine {
} }
fn set_uploaded(&self, new_timestamp: ServerTimestamp, ids: Vec<Guid>) -> Result<()> { fn set_uploaded(&self, new_timestamp: ServerTimestamp, ids: Vec<Guid>) -> Result<()> {
log::info!("sync uploaded {} records", ids.len()); info!("sync uploaded {} records", ids.len());
self.set_last_sync(new_timestamp)?; self.set_last_sync(new_timestamp)?;
Ok(()) Ok(())
} }
@@ -330,7 +332,7 @@ pub mod test {
#[test] #[test]
fn test_incoming_tabs() { fn test_incoming_tabs() {
env_logger::try_init().ok(); error_support::init_for_tests();
let engine = TabsEngine::new(Arc::new(TabsStore::new_with_mem_path("test-incoming"))); let engine = TabsEngine::new(Arc::new(TabsStore::new_with_mem_path("test-incoming")));
@@ -413,7 +415,7 @@ pub mod test {
#[test] #[test]
fn test_no_incoming_doesnt_write() { fn test_no_incoming_doesnt_write() {
env_logger::try_init().ok(); error_support::init_for_tests();
let engine = TabsEngine::new(Arc::new(TabsStore::new_with_mem_path( let engine = TabsEngine::new(Arc::new(TabsStore::new_with_mem_path(
"test_no_incoming_doesnt_write", "test_no_incoming_doesnt_write",
@@ -487,7 +489,7 @@ pub mod test {
#[test] #[test]
fn test_apply_timestamp() { fn test_apply_timestamp() {
env_logger::try_init().ok(); error_support::init_for_tests();
let engine = TabsEngine::new(Arc::new(TabsStore::new_with_mem_path( let engine = TabsEngine::new(Arc::new(TabsStore::new_with_mem_path(
"test-apply-timestamp", "test-apply-timestamp",

View File

@@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"73712444deeadd5183a00d1bbf56b4fcd6396db0872020895f854631b6c6c309","Cargo.lock":"87e2a8a777227ec1bb50603f8bfbcc176c5a04dc6659d41b72e58cd56b843834","Cargo.toml":"d8ab8a7e7531a4ff6581880f1ab17beb32e2fb9c25772e68353da1c0cced01a5","LICENSE-APACHE.md":"3ddf9be5c28fe27dad143a5dc76eea25222ad1dd68934a047064e56ed2fa40c5","LICENSE-MIT.md":"4736b5d379253afa45744afc79ddb475912b213e939bdf7cb97d9a8b6f700e5f","LICENSE-ZLIB.md":"682b4c81b85e83ce6cc6e1ace38fdd97aeb4de0e972bd2b44aa0916c54af8c96","README.md":"0d8e58e1e52ebafe3f9a4a580d1b1f795c06b79aedad1197387205ef1590a173","benches/macros.rs":"b92679a63e1d39fd949c061fa97dfba933f3e1a6e6722e710cfe4fbfd6315ba9","benches/smallvec.rs":"3b86c05238638d9a894eaecd5a4e9875e2d1b3580cd880869f32a9c0ddab5576","debug_metadata/README.md":"ad8c829e14e40bf784808e7885009b1c016e94f0d578955df86efd245ce90d5e","debug_metadata/tinyvec.natvis":"bbddce08aacef999928071124cc0a8da7c66da4d45e1e79c69f5b617e704cce7","rustfmt.toml":"8a55552a7ab9bcd89add7014f0aeb9f80b491ddacc5f67f6988940f5d46c82b0","src/array.rs":"ab9e9abca8a76b1bec9e55f83f64c9db54980780a2dc88c13868f450caef5d0e","src/array/const_generic_impl.rs":"d0f8ae529b5c9f92da0aa67d5ae24a6e7fc3287ce6d3e576794455de44502a4c","src/array/generated_impl.rs":"8c94c274aae0909b7de75beff4215af8dfc8635cce0c09d75eb57e577c3cca68","src/array/generic_array_impl.rs":"cd8fd8f940e07badd47ab1d80b38e112f5ec09c559155e8857b6d95a85a06ead","src/arrayvec.rs":"bb3d6db2ea7bb362a379d169cc4423679f4a860a1c0744940b4ff653a9d8a372","src/arrayvec_drain.rs":"384738b646a766021886f7ccc32e7f42d5283c10b38b8c438ab049ca1139b0b8","src/lib.rs":"eef8d98214da0f56c1251411dd518e83abb72bb7dee6af5a71b5ab365afdaf68","src/slicevec.rs":"66b8fac4d3856378e3ad81ea9f97440a785f3659c923182435b80e8a7b0b9cbb","src/tinyvec.rs":"ad58bd2e1cdad8616fa96ebf84150c6fd3b3062f93c446347bde7e51ee4d18f0","tests/arrayvec.rs":"873c6a179ccc07ebd424028896483cd6661baac563447ff84cbf97cb100b60d5","tests/debugger_visualizer.rs":"27ffba7de85967678c9483c6e77a35c895c0cb8f594d188ccab34116d518af32","tests/tinyvec.rs":"4bdbe191ebe71c565d0b8cdf4177240016e1caf58b56f0ff5e5777a0028d09ef"},"package":"09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71"}

104
third_party/rust/tinyvec/CHANGELOG.md vendored Normal file
View File

@@ -0,0 +1,104 @@
# Changelog
## 1.9
* Adds a `latest_stable_rust` cargo feature, which will automatically pull in
other cargo features related to the latest Stable version of rust.
* Adds `ArrayVec::try_from_array_len`
* Adds `TinyVec::into_vec` and `TinyVec::into_boxed_slice`
* Adds support for `generic-array` crate
* Adds support for the `borsh` crate
## 1.8.1
* [e00E](https://github.com/e00E) updated the rustc features so that they all
correctly depend on the lower version feature.
[pr 199](https://github.com/Lokathor/tinyvec/pull/199)
## 1.8
* [Fuuzetsu](https://github.com/Fuuzetsu) added the `ArrayVec::as_inner` method.
[pr 197](https://github.com/Lokathor/tinyvec/pull/197)
## 1.7
* [Fuuzetsu](https://github.com/Fuuzetsu) added the `rustc_1_61` cargo feature, which adds the `retain_mut` method.
[pr 198](https://github.com/Lokathor/tinyvec/pull/198)
## 1.6.1
* [e00E](https://github.com/e00E) fixed the Arbitrary impl to work on Stable
without using a feature gate.
[pr 180](https://github.com/Lokathor/tinyvec/pull/180)
## 1.6.0
* [i509VCB](https://github.com/i509VCB) added the `try_` functions for fallable reallocation.
[pr 158](https://github.com/Lokathor/tinyvec/pull/158)
* [ajtribick](https://github.com/ajtribick) added more error impls to `TryFromSliceError`.
[pr 160](https://github.com/Lokathor/tinyvec/pull/160)
* The `std` feature now automatically enables the `alloc` feature as well.
## 1.5.1
* [madsmtm](https://github.com/madsmtm) fixed an error with the `alloc` feature on very old rustc versions.
[pr 154](https://github.com/Lokathor/tinyvec/pull/154)
## 1.5.0
* [eeeebbbbrrrr](https://github.com/eeeebbbbrrrr) added an impl for [std::io::Write](https://doc.rust-lang.org/std/io/trait.Write.html) to `TinyVec` when the element type is `u8`.
This is gated behind the new `std` feature.
[pr 152](https://github.com/Lokathor/tinyvec/pull/152)
## 1.4.0
* [saethlin](https://github.com/saethlin) stabilized the usage of const generics and array map with the `rustc_1_55` feature.
[pr 149](https://github.com/Lokathor/tinyvec/pull/149)
## 1.3.1
* Improved the performance of the `clone_from` method [pr 144](https://github.com/Lokathor/tinyvec/pull/144)
## 1.3.0
* [jeffa5](https://github.com/jeffa5) added arbitrary implementations for `TinyVec` and `ArrayVec` [pr 146](https://github.com/Lokathor/tinyvec/pull/146).
* [elomatreb](https://github.com/elomatreb) implemented `DoubleEndedIterator` for `TinyVecIterator` [pr 145](https://github.com/Lokathor/tinyvec/pull/145).
## 1.2.0
* [Cryptjar](https://github.com/Cryptjar) removed the `A:Array` bound on the struct of `ArrayVec<A:Array>`,
and added the `from_array_empty` method, which is a `const fn` constructor
[pr 141](https://github.com/Lokathor/tinyvec/pull/141).
## 1.1.1
* [saethlin](https://github.com/saethlin) contributed many PRs (
[127](https://github.com/Lokathor/tinyvec/pull/127),
[128](https://github.com/Lokathor/tinyvec/pull/128),
[129](https://github.com/Lokathor/tinyvec/pull/129),
[131](https://github.com/Lokathor/tinyvec/pull/131),
[132](https://github.com/Lokathor/tinyvec/pull/132)
) to help in several benchmarks.
## 1.1.0
* [slightlyoutofphase](https://github.com/slightlyoutofphase)
added "array splat" style syntax to the `array_vec!` and `tiny_vec!` macros.
You can now write `array_vec![true; 5]` and get a length 5 array vec full of `true`,
just like normal array initialization allows. Same goes for `tiny_vec!`.
([pr 118](https://github.com/Lokathor/tinyvec/pull/118))
* [not-a-seagull](https://github.com/not-a-seagull)
added `ArrayVec::into_inner` so that you can get the array out of an `ArrayVec`.
([pr 124](https://github.com/Lokathor/tinyvec/pull/124))
## 1.0.2
* Added license files for the MIT and Apache-2.0 license options.
## 1.0.1
* Display additional features in the [docs.rs/tinyvec](https://docs.rs/tinyvec) documentation.
## 1.0.0
Initial Stable Release.

647
third_party/rust/tinyvec/Cargo.lock generated vendored Normal file
View File

@@ -0,0 +1,647 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "aho-corasick"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
version = "1.0.81"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247"
[[package]]
name = "arbitrary"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110"
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi",
"libc",
"winapi",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "borsh"
version = "1.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5430e3be710b68d984d1391c854eb431a9d548640711faa54eecb1df93db91cc"
dependencies = [
"cfg_aliases",
]
[[package]]
name = "bumpalo"
version = "3.15.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa"
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "cfg_aliases"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "clap"
version = "2.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
dependencies = [
"bitflags",
"textwrap",
"unicode-width",
]
[[package]]
name = "criterion"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f"
dependencies = [
"atty",
"cast",
"clap",
"criterion-plot",
"csv",
"itertools",
"lazy_static",
"num-traits",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_cbor",
"serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2673cc8207403546f45f5fd319a974b1e6983ad1a3ee7e6041650013be041876"
dependencies = [
"cast",
"itertools",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
dependencies = [
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
dependencies = [
"crossbeam-utils",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
[[package]]
name = "csv"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe"
dependencies = [
"csv-core",
"itoa",
"ryu",
"serde",
]
[[package]]
name = "csv-core"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70"
dependencies = [
"memchr",
]
[[package]]
name = "debugger_test"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d95bb55f592fbb86947bee426d831de84bd65602a54f5cdcb10bfa70a62e52a0"
dependencies = [
"anyhow",
"log",
"quote",
"syn 1.0.109",
]
[[package]]
name = "debugger_test_parser"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebe536452a777752b9316f0c840afbb94a2411684d4f15c081449ea801ef9e75"
dependencies = [
"anyhow",
"log",
"regex",
]
[[package]]
name = "either"
version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a"
[[package]]
name = "generic-array"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8c8444bc9d71b935156cc0ccab7f622180808af7867b1daae6547d773591703"
dependencies = [
"typenum",
]
[[package]]
name = "half"
version = "1.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403"
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
[[package]]
name = "js-sys"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d"
dependencies = [
"wasm-bindgen",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.153"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
[[package]]
name = "log"
version = "0.4.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
[[package]]
name = "memchr"
version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
[[package]]
name = "num-traits"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a"
dependencies = [
"autocfg",
]
[[package]]
name = "once_cell"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "oorandom"
version = "11.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "plotters"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609"
[[package]]
name = "plotters-svg"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab"
dependencies = [
"plotters-backend",
]
[[package]]
name = "proc-macro2"
version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rayon"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd"
dependencies = [
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
dependencies = [
"crossbeam-deque",
"crossbeam-utils",
]
[[package]]
name = "regex"
version = "1.10.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
[[package]]
name = "ryu"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "serde"
version = "1.0.197"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_cbor"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
dependencies = [
"half",
"serde",
]
[[package]]
name = "serde_derive"
version = "1.0.197"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.53",
]
[[package]]
name = "serde_json"
version = "1.0.114"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "serde_test"
version = "1.0.176"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a2f49ace1498612d14f7e0b8245519584db8299541dfe31a06374a828d620ab"
dependencies = [
"serde",
]
[[package]]
name = "smallvec"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.53"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "textwrap"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
dependencies = [
"unicode-width",
]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "tinyvec"
version = "1.9.0"
dependencies = [
"arbitrary",
"borsh",
"criterion",
"debugger_test",
"debugger_test_parser",
"generic-array",
"serde",
"serde_test",
"smallvec",
"tinyvec_macros",
]
[[package]]
name = "tinyvec_macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "typenum"
version = "1.18.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "unicode-width"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8"
dependencies = [
"cfg-if",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
"syn 2.0.53",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.53",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
[[package]]
name = "web-sys"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

158
third_party/rust/tinyvec/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,158 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "tinyvec"
version = "1.9.0"
authors = ["Lokathor <zefria@gmail.com>"]
build = false
exclude = [
"/.github",
"/*.py",
"/*.sh",
"/src-backup",
]
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "`tinyvec` provides 100% safe vec-like data structures."
readme = "README.md"
keywords = [
"vec",
"no_std",
"no-std",
]
categories = [
"data-structures",
"no-std",
]
license = "Zlib OR Apache-2.0 OR MIT"
repository = "https://github.com/Lokathor/tinyvec"
[package.metadata.docs.rs]
features = [
"alloc",
"std",
"grab_spare_slice",
"latest_stable_rust",
"serde",
"borsh",
]
rustdoc-args = [
"--cfg",
"docs_rs",
]
[package.metadata.playground]
features = [
"alloc",
"std",
"grab_spare_slice",
"latest_stable_rust",
"serde",
"borsh",
]
[features]
alloc = ["tinyvec_macros"]
debugger_visualizer = []
default = []
experimental_write_impl = []
grab_spare_slice = []
latest_stable_rust = ["rustc_1_61"]
nightly_slice_partition_dedup = []
real_blackbox = ["criterion/real_blackbox"]
rustc_1_40 = []
rustc_1_55 = ["rustc_1_40"]
rustc_1_57 = ["rustc_1_55"]
rustc_1_61 = ["rustc_1_57"]
std = ["alloc"]
[lib]
name = "tinyvec"
path = "src/lib.rs"
[[test]]
name = "arrayvec"
path = "tests/arrayvec.rs"
[[test]]
name = "debugger_visualizer"
path = "tests/debugger_visualizer.rs"
test = false
required-features = ["debugger_visualizer"]
[[test]]
name = "tinyvec"
path = "tests/tinyvec.rs"
required-features = [
"alloc",
"std",
]
[[bench]]
name = "macros"
path = "benches/macros.rs"
harness = false
required-features = ["alloc"]
[[bench]]
name = "smallvec"
path = "benches/smallvec.rs"
harness = false
required-features = [
"alloc",
"real_blackbox",
]
[dependencies.arbitrary]
version = "1"
optional = true
[dependencies.borsh]
version = "1.2.0"
optional = true
default-features = false
[dependencies.generic-array]
version = "1.1.1"
optional = true
default-features = false
[dependencies.serde]
version = "1.0"
optional = true
default-features = false
[dependencies.tinyvec_macros]
version = "0.1"
optional = true
[dev-dependencies.criterion]
version = "0.3.0"
[dev-dependencies.debugger_test]
version = "0.1"
[dev-dependencies.debugger_test_parser]
version = "0.1"
[dev-dependencies.serde_test]
version = "1.0"
[dev-dependencies.smallvec]
version = "1"
[profile.bench]
debug = 2

View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,5 @@
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,11 @@
Copyright (c) 2019 Daniel "Lokathor" Gee.
This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.

34
third_party/rust/tinyvec/README.md vendored Normal file
View File

@@ -0,0 +1,34 @@
[![License:Zlib](https://img.shields.io/badge/License-Zlib-brightgreen.svg)](https://opensource.org/licenses/Zlib)
![Minimum Rust Version](https://img.shields.io/badge/Min%20Rust-1.47-green.svg)
[![crates.io](https://img.shields.io/crates/v/tinyvec.svg)](https://crates.io/crates/tinyvec)
[![docs.rs](https://docs.rs/tinyvec/badge.svg)](https://docs.rs/tinyvec/)
![Unsafe-Zero-Percent](https://img.shields.io/badge/Unsafety-0%25-brightgreen.svg)
# tinyvec
A 100% safe crate of vec-like types.
Not just safe at the public API boundary, fully safe for all internal code too: `#![forbid(unsafe_code)]`
The provided types are as follows:
* `ArrayVec` is an array-backed vec-like data structure. It panics on overflow.
* `SliceVec` is similar, but using a `&mut [T]` as the data backing.
* `TinyVec` (`alloc` feature) is an enum that's either an `Inline(ArrayVec)` or a `Heap(Vec)`.
If a `TinyVec` is `Inline` and would overflow its array it automatically transitions to `Heap` and continues whatever it was doing.
To attain this "100% safe code" status there is one compromise: the element type of the vecs must implement `Default`.
For more API details, please see [the docs.rs documentation](https://docs.rs/tinyvec/)
## `tinyvec` Alternatives?
Maybe you don't want to use `tinyvec`, there's other crates you might use instead!
* [arrayvec](https://docs.rs/arrayvec) is a crate with array-backed structures.
* [smallvec](https://docs.rs/smallvec) is a crate where the array-backed data can be moved to the heap on overflow.
The main difference is that both of those crates use `unsafe` code.
This mostly allows them to get rid of the `Default` limitation for elements that `tinyvec` imposes.
The `smallvec` and `arrayvec` crates are generally correct, but there's been occasional bugs leading to UB.
With `tinyvec`, any uncaught bugs *can't* lead to UB, because the crate is safe code all the way through.
If you want that absolute level of assurance against UB, use `tinyvec`.

View File

@@ -0,0 +1,52 @@
use criterion::{criterion_group, criterion_main, Criterion};
use tinyvec::tiny_vec;
fn bench_tinyvec_macro(c: &mut Criterion) {
let mut g = c.benchmark_group("tinyvec_macro");
g.bench_function("0 of 32", |b| {
b.iter(|| tiny_vec!([u8; 32]));
});
g.bench_function("16 of 32", |b| {
b.iter(|| {
tiny_vec!([u8; 32]=>
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
)
});
});
g.bench_function("32 of 32", |b| {
b.iter(|| {
tiny_vec!([u8; 32]=>
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
)
});
});
g.bench_function("33 of 32", |b| {
b.iter(|| {
tiny_vec!([u8; 32]=>
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33,
)
});
});
g.bench_function("64 of 32", |b| {
b.iter(|| {
tiny_vec!([u8; 32]=>
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
)
});
});
}
criterion_group!(benches, bench_tinyvec_macro);
criterion_main!(benches);

View File

@@ -0,0 +1,503 @@
//! Benchmarks that compare TinyVec to SmallVec
//!
//! All the following commentary is based on the latest nightly at the time:
//! rustc 1.55.0 (c8dfcfe04 2021-09-06).
//!
//! Some of these benchmarks are just a few instructions, so we put our own for
//! loop inside the criterion::Bencher::iter call. This seems to improve the
//! stability of measurements, and it has the wonderful side effect of making
//! the emitted assembly easier to follow. Some of these benchmarks are totally
//! inlined so that there are no calls at all in the hot path, so finding
//! this for loop is an easy way to find your way around the emitted assembly.
//!
//! The clear method is cheaper to call for arrays of elements without a Drop
//! impl, so wherever possible we reuse a single object in the benchmark loop,
//! with a clear + black_box on each iteration in an attempt to not make that
//! visible to the optimizer.
//!
//! We always call black_box(&v), instead of v = black_box(v) because the latter
//! does a move of the inline array, which is linear in the size of the array
//! and thus varies based on the array type being benchmarked, and this move can
//! be more expensive than the function we're trying to benchmark.
//!
//! We also black_box the input to each method call. This has a significant
//! effect on the assembly emitted, for example if we do not black_box the range
//! we iterate over in the ::push benchmarks, the loop is unrolled. It's not
//! entirely clear if it's better to black_box the iterator that yields the
//! items being pushed, or to black_box at a deeper level: v.push(black_box(i))
//! for example. Anecdotally, it seems like the latter approach produces
//! unreasonably bad assembly.
use criterion::{black_box, criterion_group, criterion_main, Criterion};
use smallvec::SmallVec;
use std::iter::FromIterator;
use tinyvec::TinyVec;
const ITERS: usize = 10_000;
macro_rules! tinyvec_benches {
($c:expr, $type:ty ; $len:expr) => {{
let mut g = $c.benchmark_group(concat!(
"TinyVec_",
stringify!($type),
"_",
stringify!($len)
));
g.bench_function(
concat!(
"TinyVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::default"
),
|b| {
b.iter(|| {
for _ in 0..ITERS {
let v: TinyVec<[$type; $len]> = TinyVec::default();
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"TinyVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::clone"
),
|b| {
b.iter(|| {
let outer: TinyVec<[$type; $len]> =
black_box(TinyVec::from_iter(0..=($len as usize - 1) as _));
for _ in 0..ITERS {
let v = outer.clone();
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"TinyVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::clear"
),
|b| {
b.iter(|| {
let mut v: TinyVec<[$type; $len]> = TinyVec::default();
for _ in 0..ITERS {
v.clear();
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"TinyVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::push"
),
|b| {
b.iter(|| {
let mut v: TinyVec<[$type; $len]> = TinyVec::default();
for _ in 0..ITERS {
v.clear();
black_box(&v);
for i in black_box(0..=($len as usize - 1) as _) {
v.push(i);
}
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"TinyVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::from_iter"
),
|b| {
b.iter(|| {
for _ in 0..ITERS {
let v: TinyVec<[$type; $len]> =
TinyVec::from_iter(black_box(0..=($len as usize - 1) as _));
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"TinyVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::from_slice"
),
|b| {
b.iter(|| {
let data: &[$type] = &[0, 1, 2, 3, 4, 5, 6, 7];
for _ in 0..ITERS {
let v: TinyVec<[$type; $len]> = TinyVec::from(black_box(data));
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"TinyVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::extend"
),
|b| {
b.iter(|| {
let mut v: TinyVec<[$type; $len]> = black_box(TinyVec::default());
for _ in 0..ITERS {
v.clear();
black_box(&v);
v.extend(black_box(0..=($len as usize - 1) as _));
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"TinyVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::extend_from_slice"
),
|b| {
b.iter(|| {
let data: &[$type] = black_box(&[0, 1, 2, 3, 4, 5, 6, 7]);
let mut v: TinyVec<[$type; $len]> = black_box(TinyVec::default());
for _ in 0..ITERS {
v.clear();
black_box(&v);
v.extend_from_slice(data);
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"TinyVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::insert"
),
|b| {
b.iter(|| {
let mut v: TinyVec<[$type; $len]> = TinyVec::default();
for _ in 0..ITERS {
v.clear();
black_box(&v);
for i in black_box(0..=($len as usize - 1) as _) {
v.insert(i as usize, i);
}
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"TinyVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::remove"
),
|b| {
b.iter(|| {
let outer: TinyVec<[$type; $len]> =
black_box(TinyVec::from_iter(0..=($len as usize - 1) as _));
for _ in 0..ITERS {
let mut v = outer.clone();
for i in black_box((0..=($len as usize - 1) as _).rev()) {
v.remove(i);
}
black_box(&v);
}
});
},
);
}};
}
fn tinyvec_benches(c: &mut Criterion) {
tinyvec_benches!(c, u8; 8);
tinyvec_benches!(c, u8; 16);
tinyvec_benches!(c, u8; 32);
tinyvec_benches!(c, u8; 64);
tinyvec_benches!(c, u8; 128);
tinyvec_benches!(c, u8; 256);
tinyvec_benches!(c, u64; 2);
tinyvec_benches!(c, u64; 4);
tinyvec_benches!(c, u64; 8);
tinyvec_benches!(c, u64; 16);
tinyvec_benches!(c, u64; 32);
}
macro_rules! smallvec_benches {
($c:expr, $type:ty ; $len:expr) => {{
let mut g = $c.benchmark_group(concat!(
"SmallVec_",
stringify!($type),
"_",
stringify!($len)
));
g.bench_function(
concat!(
"SmallVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::default"
),
|b| {
b.iter(|| {
for _ in 0..ITERS {
let v: SmallVec<[$type; $len]> = SmallVec::default();
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"SmallVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::clone"
),
|b| {
b.iter(|| {
let outer: SmallVec<[$type; $len]> =
black_box(SmallVec::from_iter(0..=($len as usize - 1) as _));
for _ in 0..ITERS {
let v = outer.clone();
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"SmallVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::clear"
),
|b| {
b.iter(|| {
let mut v: SmallVec<[$type; $len]> = SmallVec::default();
for _ in 0..ITERS {
v.clear();
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"SmallVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::push"
),
|b| {
b.iter(|| {
let mut v: SmallVec<[$type; $len]> = SmallVec::default();
for _ in 0..ITERS {
v.clear();
black_box(&v);
for i in black_box(0..=($len as usize - 1) as _) {
v.push(i);
}
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"SmallVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::from_iter"
),
|b| {
b.iter(|| {
for _ in 0..ITERS {
let v: SmallVec<[$type; $len]> =
SmallVec::from_iter(black_box(0..=($len as usize - 1) as _));
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"SmallVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::from_slice"
),
|b| {
b.iter(|| {
let data: &[$type] = &[0, 1, 2, 3, 4, 5, 6, 7];
for _ in 0..ITERS {
let v: SmallVec<[$type; $len]> = SmallVec::from(black_box(data));
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"SmallVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::extend"
),
|b| {
b.iter(|| {
let mut v: SmallVec<[$type; $len]> = black_box(SmallVec::default());
for _ in 0..ITERS {
v.clear();
black_box(&v);
v.extend(black_box(0..=($len as usize - 1) as _));
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"SmallVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::extend_from_slice"
),
|b| {
b.iter(|| {
let data: &[$type] = black_box(&[0, 1, 2, 3, 4, 5, 6, 7]);
let mut v: SmallVec<[$type; $len]> = black_box(SmallVec::default());
for _ in 0..ITERS {
v.clear();
black_box(&v);
v.extend_from_slice(data);
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"SmallVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::insert"
),
|b| {
b.iter(|| {
let mut v: SmallVec<[$type; $len]> = SmallVec::default();
for _ in 0..ITERS {
v.clear();
black_box(&v);
for i in black_box(0..=($len as usize - 1) as _) {
v.insert(i as usize, i);
}
black_box(&v);
}
});
},
);
g.bench_function(
concat!(
"SmallVec<[",
stringify!($type),
"; ",
stringify!($len),
"]>::remove"
),
|b| {
b.iter(|| {
let outer: SmallVec<[$type; $len]> =
black_box(SmallVec::from_iter(0..=($len as usize - 1) as _));
for _ in 0..ITERS {
let mut v = outer.clone();
for i in black_box((0..=($len as usize - 1) as _).rev()) {
v.remove(i);
}
black_box(&v);
}
});
},
);
}};
}
fn smallvec_benches(c: &mut Criterion) {
smallvec_benches!(c, u8; 8);
smallvec_benches!(c, u8; 16);
smallvec_benches!(c, u8; 32);
smallvec_benches!(c, u8; 64);
smallvec_benches!(c, u8; 128);
smallvec_benches!(c, u8; 256);
smallvec_benches!(c, u64; 2);
smallvec_benches!(c, u64; 4);
smallvec_benches!(c, u64; 8);
smallvec_benches!(c, u64; 16);
smallvec_benches!(c, u64; 32);
}
criterion_group!(benches, tinyvec_benches, smallvec_benches);
criterion_main!(benches);

View File

@@ -0,0 +1,111 @@
## Debugger Visualizers
Many languages and debuggers enable developers to control how a type is
displayed in a debugger. These are called "debugger visualizations" or "debugger
views".
The Windows debuggers (WinDbg\CDB) support defining custom debugger visualizations using
the `Natvis` framework. To use Natvis, developers write XML documents using the natvis
schema that describe how debugger types should be displayed with the `.natvis` extension.
(See: https://docs.microsoft.com/en-us/visualstudio/debugger/create-custom-views-of-native-objects?view=vs-2019)
The Natvis files provide patterns which match type names a description of how to display
those types.
The Natvis schema can be found either online (See: https://code.visualstudio.com/docs/cpp/natvis#_schema)
or locally at `<VS Installation Folder>\Xml\Schemas\1033\natvis.xsd`.
The GNU debugger (GDB) supports defining custom debugger views using Pretty Printers.
Pretty printers are written as python scripts that describe how a type should be displayed
when loaded up in GDB/LLDB. (See: https://sourceware.org/gdb/onlinedocs/gdb/Pretty-Printing.html#Pretty-Printing)
The pretty printers provide patterns, which match type names, and for matching
types, describe how to display those types. (For writing a pretty printer, see: https://sourceware.org/gdb/onlinedocs/gdb/Writing-a-Pretty_002dPrinter.html#Writing-a-Pretty_002dPrinter).
### Embedding Visualizers
Through the use of the currently unstable `#[debugger_visualizer]` attribute, the `tinyvec`
crate can embed debugger visualizers into the crate metadata.
Currently the two types of visualizers supported are Natvis and Pretty printers.
For Natvis files, when linking an executable with a crate that includes Natvis files,
the MSVC linker will embed the contents of all Natvis files into the generated `PDB`.
For pretty printers, the compiler will encode the contents of the pretty printer
in the `.debug_gdb_scripts` section of the `ELF` generated.
### Testing Visualizers
The `tinyvec` crate supports testing debugger visualizers defined for this crate. The entry point for
these tests are `tests/debugger_visualizer.rs`. These tests are defined using the `debugger_test` and
`debugger_test_parser` crates. The `debugger_test` crate is a proc macro crate which defines a
single proc macro attribute, `#[debugger_test]`. For more detailed information about this crate,
see https://crates.io/crates/debugger_test. The CI pipeline for the `tinyvec` crate has been updated
to run the debugger visualizer tests to ensure debugger visualizers do not become broken/stale.
The `#[debugger_test]` proc macro attribute may only be used on test functions and will run the
function under the debugger specified by the `debugger` meta item.
This proc macro attribute has 3 required values:
1. The first required meta item, `debugger`, takes a string value which specifies the debugger to launch.
2. The second required meta item, `commands`, takes a string of new line (`\n`) separated list of debugger
commands to run.
3. The third required meta item, `expected_statements`, takes a string of new line (`\n`) separated list of
statements that must exist in the debugger output. Pattern matching through regular expressions is also
supported by using the `pattern:` prefix for each expected statement.
#### Example:
```rust
#[debugger_test(
debugger = "cdb",
commands = "command1\ncommand2\ncommand3",
expected_statements = "statement1\nstatement2\nstatement3")]
fn test() {
}
```
Using a multiline string is also supported, with a single debugger command/expected statement per line:
```rust
#[debugger_test(
debugger = "cdb",
commands = "
command1
command2
command3",
expected_statements = "
statement1
pattern:statement[0-9]+
statement3")]
fn test() {
}
```
In the example above, the second expected statement uses pattern matching through a regular expression
by using the `pattern:` prefix.
#### Testing Locally
Currently, only Natvis visualizations have been defined for the `tinyvec` crate via `debug_metadata/tinyvec.natvis`,
which means the `tests/debugger_visualizer.rs` tests need to be run on Windows using the `*-pc-windows-msvc` targets.
To run these tests locally, first ensure the debugging tools for Windows are installed or install them following
the steps listed here, [Debugging Tools for Windows](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/).
Once the debugging tools have been installed, the tests can be run in the same manner as they are in the CI
pipeline.
#### Note
When running the debugger visualizer tests, `tests/debugger_visualizer.rs`, they need to be run consecutively
and not in parallel. This can be achieved by passing the flag `--test-threads=1` to rustc. This is due to
how the debugger tests are run. Each test marked with the `#[debugger_test]` attribute launches a debugger
and attaches it to the current test process. If tests are running in parallel, the test will try to attach
a debugger to the current process which may already have a debugger attached causing the test to fail.
For example:
```
cargo test --test debugger_visualizer --features debugger_visualizer -- --test-threads=1
```

View File

@@ -0,0 +1,24 @@
<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
<Type Name="tinyvec::arrayvec::ArrayVec&lt;array$&lt;*,*&gt;&gt;">
<DisplayString>{{ len={len} }}</DisplayString>
<Expand>
<Item Name="[len]">len</Item>
<Item Name="[capacity]">$T2</Item>
<ArrayItems>
<Size>len</Size>
<ValuePointer>($T1*)data</ValuePointer>
</ArrayItems>
</Expand>
</Type>
<Type Name="tinyvec::slicevec::SliceVec&lt;*&gt;">
<DisplayString>{{ len={len} }}</DisplayString>
<Expand>
<Item Name="[len]">len</Item>
<ArrayItems>
<Size>len</Size>
<ValuePointer>data.data_ptr</ValuePointer>
</ArrayItems>
</Expand>
</Type>
</AutoVisualizer>

14
third_party/rust/tinyvec/rustfmt.toml vendored Normal file
View File

@@ -0,0 +1,14 @@
# Stable
edition = "2018"
fn_params_layout = "Compressed"
max_width = 80
tab_spaces = 2
use_field_init_shorthand = true
use_try_shorthand = true
use_small_heuristics = "Max"
# Unstable
format_code_in_doc_comments = true
wrap_comments = true
imports_granularity = "Crate"

54
third_party/rust/tinyvec/src/array.rs vendored Normal file
View File

@@ -0,0 +1,54 @@
/// A trait for types that are an array.
///
/// An "array", for our purposes, has the following properties:
/// * Owns some number of elements.
/// * The element type can be generic, but must implement [`Default`].
/// * The capacity is fixed at compile time, based on the implementing type.
/// * You can get a shared or mutable slice to the elements.
///
/// You are generally **not** expected to need to implement this yourself. It is
/// already implemented for all the major array lengths (`0..=32` and the powers
/// of 2 up to 4,096), or for all array lengths with the feature `rustc_1_55`.
///
/// **Additional lengths can easily be added upon request.**
///
/// ## Safety Reminder
///
/// Just a reminder: this trait is 100% safe, which means that `unsafe` code
/// **must not** rely on an instance of this trait being correct.
pub trait Array {
/// The type of the items in the thing.
type Item: Default;
/// The number of slots in the thing.
const CAPACITY: usize;
/// Gives a shared slice over the whole thing.
///
/// A correct implementation will return a slice with a length equal to the
/// `CAPACITY` value.
fn as_slice(&self) -> &[Self::Item];
/// Gives a unique slice over the whole thing.
///
/// A correct implementation will return a slice with a length equal to the
/// `CAPACITY` value.
fn as_slice_mut(&mut self) -> &mut [Self::Item];
/// Create a default-initialized instance of ourself, similar to the
/// [`Default`] trait, but implemented for the same range of sizes as
/// [`Array`].
fn default() -> Self;
}
#[cfg(all(feature = "generic-array", not(feature = "rustc_1_55")))]
core::compile_error!("generic-array requires `rustc_1_55` feature");
#[cfg(feature = "rustc_1_55")]
mod const_generic_impl;
#[cfg(not(feature = "rustc_1_55"))]
mod generated_impl;
#[cfg(feature = "generic-array")]
mod generic_array_impl;

View File

@@ -0,0 +1,23 @@
use super::Array;
impl<T: Default, const N: usize> Array for [T; N] {
type Item = T;
const CAPACITY: usize = N;
#[inline(always)]
#[must_use]
fn as_slice(&self) -> &[T] {
&*self
}
#[inline(always)]
#[must_use]
fn as_slice_mut(&mut self) -> &mut [T] {
&mut *self
}
#[inline(always)]
fn default() -> Self {
[(); N].map(|_| Default::default())
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,26 @@
use core::default;
use super::Array;
use generic_array::{ArrayLength, GenericArray};
impl<T: Default, N: ArrayLength> Array for GenericArray<T, N> {
type Item = T;
const CAPACITY: usize = N::USIZE;
#[inline(always)]
#[must_use]
fn as_slice(&self) -> &[T] {
&*self
}
#[inline(always)]
#[must_use]
fn as_slice_mut(&mut self) -> &mut [T] {
&mut *self
}
#[inline(always)]
fn default() -> Self {
<Self as Default>::default()
}
}

Some files were not shown because too many files have changed in this diff Show More