Backed out changeset 706e9934d689 (bug 1888472) for causing build bustages @ Cargo.lock CLOSED TREE
This commit is contained in:
@@ -75,9 +75,9 @@ git = "https://github.com/mozilla-spidermonkey/jsparagus"
|
||||
rev = "61f399c53a641ebd3077c1f39f054f6d396a633c"
|
||||
replace-with = "vendored-sources"
|
||||
|
||||
[source."git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460"]
|
||||
[source."git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f"]
|
||||
git = "https://github.com/mozilla/application-services"
|
||||
rev = "03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
rev = "3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
replace-with = "vendored-sources"
|
||||
|
||||
[source."git+https://github.com/mozilla/audioipc?rev=e6f44a2bd1e57d11dfc737632a9e849077632330"]
|
||||
|
||||
60
Cargo.lock
generated
60
Cargo.lock
generated
@@ -1,6 +1,6 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 4
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "aa-stroke"
|
||||
@@ -1786,7 +1786,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "error-support"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"error-support-macros",
|
||||
"lazy_static",
|
||||
@@ -1798,7 +1798,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "error-support-macros"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -1909,7 +1909,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "firefox-versioning"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
@@ -2483,7 +2483,7 @@ dependencies = [
|
||||
"unicode-bidi-ffi",
|
||||
"url",
|
||||
"viaduct",
|
||||
"webext-storage",
|
||||
"webext_storage_bridge",
|
||||
"webrender_bindings",
|
||||
"wgpu_bindings",
|
||||
"wpf-gpu-raster",
|
||||
@@ -2498,7 +2498,6 @@ dependencies = [
|
||||
"suggest",
|
||||
"tabs",
|
||||
"uniffi",
|
||||
"webext-storage",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3187,7 +3186,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "interrupt-support"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"parking_lot",
|
||||
@@ -4695,7 +4694,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "nss_build_common"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
|
||||
[[package]]
|
||||
name = "nsstring"
|
||||
@@ -4908,7 +4907,7 @@ checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba"
|
||||
[[package]]
|
||||
name = "payload-support"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_derive",
|
||||
@@ -5404,7 +5403,7 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da"
|
||||
[[package]]
|
||||
name = "relevancy"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"base64 0.21.3",
|
||||
@@ -5429,7 +5428,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "remote_settings"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"camino",
|
||||
@@ -5441,7 +5440,6 @@ dependencies = [
|
||||
"rusqlite",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"thiserror",
|
||||
"uniffi",
|
||||
"url",
|
||||
@@ -6001,7 +5999,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sql-support"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"interrupt-support",
|
||||
"lazy_static",
|
||||
@@ -6180,7 +6178,7 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
|
||||
[[package]]
|
||||
name = "suggest"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"chrono",
|
||||
@@ -6232,7 +6230,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sync-guid"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"base64 0.21.3",
|
||||
"rand",
|
||||
@@ -6243,7 +6241,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "sync15"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"error-support",
|
||||
@@ -6283,7 +6281,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "tabs"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"error-support",
|
||||
@@ -6607,7 +6605,7 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"rusqlite",
|
||||
"serde",
|
||||
@@ -6992,7 +6990,7 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
|
||||
[[package]]
|
||||
name = "viaduct"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"ffi-support",
|
||||
"log",
|
||||
@@ -7151,7 +7149,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "webext-storage"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=03cf4a362408b9caffa6848aae2fcf472a789460#03cf4a362408b9caffa6848aae2fcf472a789460"
|
||||
source = "git+https://github.com/mozilla/application-services?rev=3303de12a04710164f7c80fb5e466fbf2560a20f#3303de12a04710164f7c80fb5e466fbf2560a20f"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"error-support",
|
||||
@@ -7172,6 +7170,28 @@ dependencies = [
|
||||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "webext_storage_bridge"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"atomic_refcell",
|
||||
"cstr",
|
||||
"golden_gate",
|
||||
"interrupt-support",
|
||||
"moz_task",
|
||||
"nserror",
|
||||
"nsstring",
|
||||
"once_cell",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sql-support",
|
||||
"storage_variant",
|
||||
"thin-vec",
|
||||
"webext-storage",
|
||||
"xpcom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "webrender"
|
||||
version = "0.62.0"
|
||||
|
||||
16
Cargo.toml
16
Cargo.toml
@@ -218,13 +218,13 @@ midir = { git = "https://github.com/mozilla/midir.git", rev = "85156e360a37d8517
|
||||
malloc_size_of_derive = { path = "xpcom/rust/malloc_size_of_derive" }
|
||||
|
||||
# application-services overrides to make updating them all simpler.
|
||||
interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "03cf4a362408b9caffa6848aae2fcf472a789460" }
|
||||
relevancy = { git = "https://github.com/mozilla/application-services", rev = "03cf4a362408b9caffa6848aae2fcf472a789460" }
|
||||
sql-support = { git = "https://github.com/mozilla/application-services", rev = "03cf4a362408b9caffa6848aae2fcf472a789460" }
|
||||
suggest = { git = "https://github.com/mozilla/application-services", rev = "03cf4a362408b9caffa6848aae2fcf472a789460" }
|
||||
sync15 = { git = "https://github.com/mozilla/application-services", rev = "03cf4a362408b9caffa6848aae2fcf472a789460" }
|
||||
tabs = { git = "https://github.com/mozilla/application-services", rev = "03cf4a362408b9caffa6848aae2fcf472a789460" }
|
||||
viaduct = { git = "https://github.com/mozilla/application-services", rev = "03cf4a362408b9caffa6848aae2fcf472a789460" }
|
||||
webext-storage = { git = "https://github.com/mozilla/application-services", rev = "03cf4a362408b9caffa6848aae2fcf472a789460" }
|
||||
interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "3303de12a04710164f7c80fb5e466fbf2560a20f" }
|
||||
relevancy = { git = "https://github.com/mozilla/application-services", rev = "3303de12a04710164f7c80fb5e466fbf2560a20f" }
|
||||
sql-support = { git = "https://github.com/mozilla/application-services", rev = "3303de12a04710164f7c80fb5e466fbf2560a20f" }
|
||||
suggest = { git = "https://github.com/mozilla/application-services", rev = "3303de12a04710164f7c80fb5e466fbf2560a20f" }
|
||||
sync15 = { git = "https://github.com/mozilla/application-services", rev = "3303de12a04710164f7c80fb5e466fbf2560a20f" }
|
||||
tabs = { git = "https://github.com/mozilla/application-services", rev = "3303de12a04710164f7c80fb5e466fbf2560a20f" }
|
||||
viaduct = { git = "https://github.com/mozilla/application-services", rev = "3303de12a04710164f7c80fb5e466fbf2560a20f" }
|
||||
webext-storage = { git = "https://github.com/mozilla/application-services", rev = "3303de12a04710164f7c80fb5e466fbf2560a20f" }
|
||||
|
||||
allocator-api2 = { path = "third_party/rust/allocator-api2" }
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
# RustWebextstorage.sys.mjs
|
||||
|
||||
```{js:autoclass} RustWebextstorage.sys.UnexpectedError
|
||||
:members:
|
||||
:exclude-members: UnexpectedError
|
||||
```
|
||||
```{js:autoclass} RustWebextstorage.sys.JsonError
|
||||
:members:
|
||||
:exclude-members: JsonError
|
||||
```
|
||||
```{js:autoclass} RustWebextstorage.sys.QuotaError
|
||||
:members:
|
||||
:exclude-members: QuotaError
|
||||
```
|
||||
@@ -111,3 +111,10 @@ The following XPCOM components are written in Rust.
|
||||
which [merges](https://mozilla.github.io/dogear) bookmarks from Firefox Sync
|
||||
with bookmarks in the Places database.
|
||||
[There's also some docs on how Rust interacts with Sync](/services/sync/rust-engines.rst)
|
||||
- [webext_storage_bridge](https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge),
|
||||
which powers the WebExtension storage.sync API. It's a self-contained example
|
||||
that pulls in a crate from application-services for the heavy lifting, wraps
|
||||
that up in a Rust XPCOM component, and then wraps the component in a JS
|
||||
interface. There's also some boilerplate there around adding a
|
||||
`components.conf` file, and a dummy C++ header that declares the component
|
||||
constructor. [It has some in-depth documentation on how it hangs together](../toolkit/components/extensions/webextensions/webext-storage.rst).
|
||||
|
||||
@@ -3626,6 +3626,7 @@ pref("webextensions.webRequest.requestBodyMaxRawBytes", 16777216);
|
||||
pref("webextensions.storage.session.enforceQuota", false);
|
||||
#endif
|
||||
|
||||
pref("webextensions.storage.sync.enabled", true);
|
||||
// Should we use the old kinto-based implementation of storage.sync? To be removed in bug 1637465.
|
||||
pref("webextensions.storage.sync.kinto", false);
|
||||
// Server used by the old kinto-based implementation of storage.sync.
|
||||
|
||||
@@ -125,8 +125,9 @@ WebExt-Storage
|
||||
|
||||
webext-storage is implemented in Rust and lives in
|
||||
`application services <https://github.com/mozilla/application-services/tree/main/components/webext-storage>`_
|
||||
and is vendored into the addons code - note that this includes the storage
|
||||
*and* Sync code. The Sync engine itself is a shim in the sync directory.
|
||||
and is vendored into the `addons code <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge>`_ -
|
||||
note that this includes the storage *and* Sync code. The Sync engine itself
|
||||
is a shim in the sync directory.
|
||||
|
||||
See the :doc:`rust-engines` document for more about how rust engines are
|
||||
integrated.
|
||||
|
||||
@@ -17,10 +17,21 @@ The bridge
|
||||
==========
|
||||
|
||||
`"Golden Gate" <https://searchfox.org/mozilla-central/source/services/sync/golden_gate>`_
|
||||
was previously used to help bridge any Rust implemented Sync engines with desktop,
|
||||
but most of that logic has been removed. The integration of `UniFFI <https://github.com/mozilla/uniffi-rs>`_-ed components
|
||||
made the Golden Gate bridge code obsolete. Currently Golden Gate contains the
|
||||
logging logic for the components and the bridged engines exist in application
|
||||
services within the respective sync components. For instance, these are bridged
|
||||
engines for `tabs <https://github.com/mozilla/application-services/blob/main/components/tabs/src/sync/bridge.rs>`_ and
|
||||
`webext-storage <https://github.com/mozilla/application-services/blob/main/components/webext-storage/src/sync/bridge.rs>`_.
|
||||
is a utility to help bridge any Rust implemented Sync engines with desktop. In
|
||||
other words, it's a "rusty bridge" - get it? Get it? Yet another of Lina's puns
|
||||
that live on!
|
||||
|
||||
One of the key challenges with integrating a Rust Sync component with desktop
|
||||
is the different threading models. The Rust code tends to be synchronous -
|
||||
most functions block the calling thread to do the disk or network IO necessary
|
||||
to work - it assumes that the consumer will delegate this to some other thread.
|
||||
|
||||
So golden_gate is this background thread delegation for a Rust Sync engine -
|
||||
gecko calls golden-gate on the main thread, it marshalls the call to a worker
|
||||
thread, and the result is marshalled back to the main thread.
|
||||
|
||||
It's worth noting that golden_gate is just for the Sync engine part - other
|
||||
parts of the component (ie, the part that provides the functionality that's not
|
||||
sync related) will have its own mechanism for this. For example, the
|
||||
`webext-storage bridge <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge/src>`_
|
||||
uses a similar technique `which has some in-depth documentation <../../toolkit/components/extensions/webextensions/webext-storage.html>`_.
|
||||
|
||||
74
services/sync/golden_gate/src/ferry.rs
Normal file
74
services/sync/golden_gate/src/ferry.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use nsstring::nsCString;
|
||||
use storage_variant::VariantType;
|
||||
use sync15::Guid;
|
||||
use xpcom::{interfaces::nsIVariant, RefPtr};
|
||||
|
||||
/// An operation that runs on the background thread, and optionally passes a
|
||||
/// result to its callback.
|
||||
pub enum Ferry {
|
||||
LastSync,
|
||||
SetLastSync(i64),
|
||||
SyncId,
|
||||
ResetSyncId,
|
||||
EnsureCurrentSyncId(String),
|
||||
SyncStarted,
|
||||
StoreIncoming(Vec<nsCString>),
|
||||
SetUploaded(i64, Vec<Guid>),
|
||||
SyncFinished,
|
||||
Reset,
|
||||
Wipe,
|
||||
}
|
||||
|
||||
impl Ferry {
|
||||
/// Returns the operation name for debugging and labeling the task
|
||||
/// runnable.
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
Ferry::LastSync => concat!(module_path!(), "getLastSync"),
|
||||
Ferry::SetLastSync(_) => concat!(module_path!(), "setLastSync"),
|
||||
Ferry::SyncId => concat!(module_path!(), "getSyncId"),
|
||||
Ferry::ResetSyncId => concat!(module_path!(), "resetSyncId"),
|
||||
Ferry::EnsureCurrentSyncId(_) => concat!(module_path!(), "ensureCurrentSyncId"),
|
||||
Ferry::SyncStarted => concat!(module_path!(), "syncStarted"),
|
||||
Ferry::StoreIncoming { .. } => concat!(module_path!(), "storeIncoming"),
|
||||
Ferry::SetUploaded { .. } => concat!(module_path!(), "setUploaded"),
|
||||
Ferry::SyncFinished => concat!(module_path!(), "syncFinished"),
|
||||
Ferry::Reset => concat!(module_path!(), "reset"),
|
||||
Ferry::Wipe => concat!(module_path!(), "wipe"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of a ferry task, sent from the background thread back to the
|
||||
/// main thread. Results are converted to variants, and passed as arguments to
|
||||
/// `mozIBridgedSyncEngineCallback`s.
|
||||
pub enum FerryResult {
|
||||
LastSync(i64),
|
||||
SyncId(Option<String>),
|
||||
AssignedSyncId(String),
|
||||
Null,
|
||||
}
|
||||
|
||||
impl Default for FerryResult {
|
||||
fn default() -> Self {
|
||||
FerryResult::Null
|
||||
}
|
||||
}
|
||||
|
||||
impl FerryResult {
|
||||
/// Converts the result to an `nsIVariant` that can be passed as an
|
||||
/// argument to `callback.handleResult()`.
|
||||
pub fn into_variant(self) -> RefPtr<nsIVariant> {
|
||||
match self {
|
||||
FerryResult::LastSync(v) => v.into_variant(),
|
||||
FerryResult::SyncId(Some(v)) => nsCString::from(v).into_variant(),
|
||||
FerryResult::SyncId(None) => ().into_variant(),
|
||||
FerryResult::AssignedSyncId(v) => nsCString::from(v).into_variant(),
|
||||
FerryResult::Null => ().into_variant(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,16 +2,118 @@
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
//! **Golden Gate** 🌉 was created to bridge Desktop Sync to our suite of
|
||||
//! Rust sync and storage components. But the UniFFI-cation of our
|
||||
//! components made much of Golden Gate's logic obsolete. It is now mainly
|
||||
//! a means to access LogSink, the logger for our components.
|
||||
//! **Golden Gate** 🌉 is a crate for bridging Desktop Sync to our suite of
|
||||
//! Rust sync and storage components. It connects Sync's `BridgedEngine` class
|
||||
//! to the Rust `BridgedEngine` trait via the `mozIBridgedSyncEngine` XPCOM
|
||||
//! interface.
|
||||
//!
|
||||
//! Due to limitations in implementing XPCOM interfaces for generic types,
|
||||
//! Golden Gate doesn't implement `mozIBridgedSyncEngine` directly. Instead,
|
||||
//! it provides helpers, called "ferries", for passing Sync records between
|
||||
//! JavaScript and Rust. The ferries also handle threading and type
|
||||
//! conversions.
|
||||
//!
|
||||
//! Here's a step-by-step guide for adding a new Rust Sync engine to Firefox.
|
||||
//!
|
||||
//! ## Step 1: Create your (XPCOM) bridge
|
||||
//!
|
||||
//! In your consuming crate, define a type for your `mozIBridgedSyncEngine`
|
||||
//! implementation. We'll call this type the **brige**. The bridge is
|
||||
//! responsible for exposing your Sync engine to XPIDL [^1], in a way that lets
|
||||
//! JavaScript call it.
|
||||
//!
|
||||
//! For your bridge type, you'll need to implement an xpcom interface with the
|
||||
//! `#[xpcom(implement(mozIBridgedSyncEngine), nonatomic)]` attribute then
|
||||
//! define `xpcom_method!()` stubs for the `mozIBridgedSyncEngine` methods. For
|
||||
//! more details about implementing XPCOM methods in Rust, check out the docs in
|
||||
//! `xpcom/rust/xpcom/src/method.rs`.
|
||||
//!
|
||||
//! You'll also need to add an entry for your bridge type to `components.conf`,
|
||||
//! and define C++ and Rust constructors for it, so that JavaScript code can
|
||||
//! create instances of it. Check out `NS_NewWebExtStorage` (and, in C++,
|
||||
//! `mozilla::extensions::storageapi::NewWebExtStorage`) and
|
||||
//! `NS_NewSyncedBookmarksMerger` (`mozilla::places::NewSyncedBookmarksMerger`
|
||||
//! in C++) for how to do this.
|
||||
//!
|
||||
//! [^1]: You can think of XPIDL as a souped-up C FFI, with richer types and a
|
||||
//! degree of type safety.
|
||||
//!
|
||||
//! ## Step 2: Add a background task queue to your bridge
|
||||
//!
|
||||
//! A task queue lets your engine do I/O, merging, and other syncing tasks on a
|
||||
//! background thread pool. This is important because database reads and writes
|
||||
//! can take an unpredictable amount of time. Doing these on the main thread can
|
||||
//! cause jank, and, in the worst case, lock up the browser UI for seconds at a
|
||||
//! time.
|
||||
//!
|
||||
//! The `moz_task` crate provides a `create_background_task_queue` function to
|
||||
//! do this. Once you have a queue, you can use it to call into your Rust
|
||||
//! engine. Golden Gate takes care of ferrying arguments back and forth across
|
||||
//! the thread boundary.
|
||||
//!
|
||||
//! Since it's a queue, ferries arrive in the order they're scheduled, so
|
||||
//! your engine's `store_incoming` method will always be called before `apply`,
|
||||
//! which is likewise called before `set_uploaded`. The thread manager scales
|
||||
//! the pool for you; you don't need to create or manage your own threads.
|
||||
//!
|
||||
//! ## Step 3: Create your Rust engine
|
||||
//!
|
||||
//! Next, you'll need to implement the Rust side of the bridge. This is a type
|
||||
//! that implements the `BridgedEngine` trait.
|
||||
//!
|
||||
//! Bridged engines handle storing incoming Sync records, merging changes,
|
||||
//! resolving conflicts, and fetching outgoing records for upload. Under the
|
||||
//! hood, your engine will hold either a database connection directly, or
|
||||
//! another object that does.
|
||||
//!
|
||||
//! Although outside the scope of Golden Gate, your engine will also likely
|
||||
//! expose a data storage API, for fetching, updating, and deleting items
|
||||
//! locally. Golden Gate provides the syncing layer on top of this local store.
|
||||
//!
|
||||
//! A `BridgedEngine` itself doesn't need to be `Send` or `Sync`, but the
|
||||
//! ferries require both, since they're calling into your bridge on the
|
||||
//! background task queue.
|
||||
//!
|
||||
//! In practice, this means your bridge will need to hold a thread-safe owned
|
||||
//! reference to the engine, via `Arc<Mutex<BridgedEngine>>`. In fact, this
|
||||
//! pattern is so common that Golden Gate implements `BridgedEngine` for any
|
||||
//! `Mutex<BridgedEngine>`, which automatically locks the mutex before calling
|
||||
//! into the engine.
|
||||
//!
|
||||
//! ## Step 4: Connect the bridge to the JavaScript and Rust sides
|
||||
//!
|
||||
//! On the JavaScript side, you'll need to subclass Sync's `BridgedEngine`
|
||||
//! class, and give it a handle to your XPCOM bridge. The base class has all the
|
||||
//! machinery for hooking up any `mozIBridgedSyncEngine` implementation so that
|
||||
//! Sync can drive it.
|
||||
//!
|
||||
//! On the Rust side, each `mozIBridgedSyncEngine` method should create a
|
||||
//! Golden Gate ferry, and dispatch it to the background task queue. The
|
||||
//! ferries correspond to the method names. For example, `ensureCurrentSyncId`
|
||||
//! should create a `Ferry::ensure_current_sync_id(...)`; `storeIncoming`, a
|
||||
//! `Ferry::store_incoming(...)`; and so on. This is mostly boilerplate.
|
||||
//!
|
||||
//! And that's it! Each ferry will, in turn, call into your Rust
|
||||
//! `BridgedEngine`, and send the results back to JavaScript.
|
||||
//!
|
||||
//! For an example of how all this works, including exposing a storage (not
|
||||
//! just syncing!) API to JS via XPIDL, check out `webext_storage::Bridge` for
|
||||
//! the `storage.sync` API!
|
||||
|
||||
#[macro_use]
|
||||
extern crate cstr;
|
||||
|
||||
pub mod error;
|
||||
mod ferry;
|
||||
pub mod log;
|
||||
pub mod task;
|
||||
|
||||
pub use crate::log::LogSink;
|
||||
pub use error::{Error, Result};
|
||||
// Re-export items from `interrupt-support` and `sync15`, so that
|
||||
// consumers of `golden_gate` don't have to depend on them.
|
||||
pub use interrupt_support::{Interrupted, Interruptee};
|
||||
pub use sync15::bso::{IncomingBso, OutgoingBso};
|
||||
pub use sync15::engine::{ApplyResults, BridgedEngine};
|
||||
pub use sync15::Guid;
|
||||
pub use task::{ApplyTask, FerryTask};
|
||||
|
||||
355
services/sync/golden_gate/src/task.rs
Normal file
355
services/sync/golden_gate/src/task.rs
Normal file
@@ -0,0 +1,355 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::{fmt::Write, mem, result};
|
||||
|
||||
use atomic_refcell::AtomicRefCell;
|
||||
use moz_task::{DispatchOptions, Task, TaskRunnable, ThreadPtrHandle, ThreadPtrHolder};
|
||||
use nserror::{nsresult, NS_ERROR_FAILURE};
|
||||
use nsstring::{nsACString, nsCString};
|
||||
use sync15::engine::{ApplyResults, BridgedEngine};
|
||||
use sync15::Guid;
|
||||
use thin_vec::ThinVec;
|
||||
use xpcom::{
|
||||
interfaces::{
|
||||
mozIBridgedSyncEngineApplyCallback, mozIBridgedSyncEngineCallback, nsIEventTarget,
|
||||
},
|
||||
RefPtr,
|
||||
};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::ferry::{Ferry, FerryResult};
|
||||
|
||||
/// A ferry task sends (or ferries) an operation to a bridged engine on a
|
||||
/// background thread or task queue, and ferries back an optional result to
|
||||
/// a callback.
|
||||
pub struct FerryTask {
|
||||
/// We want to ensure scheduled ferries can't block finalization of the underlying
|
||||
/// store - we want a degree of confidence that closing the database will happen when
|
||||
/// we want even if tasks are queued up to run on another thread.
|
||||
/// We rely on the semantics of our BridgedEngines to help here:
|
||||
/// * A bridged engine is expected to hold a weak reference to its store.
|
||||
/// * Our LazyStore is the only thing holding a reference to the "real" store.
|
||||
/// Thus, when our LazyStore asks our "real" store to close, we can be confident
|
||||
/// a close will happen (ie, we assume that the real store will be able to unwrapp
|
||||
/// the underlying sqlite `Connection` (using `Arc::try_unwrap`) and close it.
|
||||
/// However, note that if an operation on the bridged engine is currently running,
|
||||
/// we will block waiting for that operation to complete, so while this isn't
|
||||
/// guaranteed to happen immediately, it should happen "soon enough".
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
ferry: Ferry,
|
||||
callback: ThreadPtrHandle<mozIBridgedSyncEngineCallback>,
|
||||
result: AtomicRefCell<anyhow::Result<FerryResult>>,
|
||||
}
|
||||
|
||||
impl FerryTask {
|
||||
/// Creates a task to fetch the engine's last sync time, in milliseconds.
|
||||
#[inline]
|
||||
pub fn for_last_sync(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
Self::with_ferry(engine, Ferry::LastSync, callback)
|
||||
}
|
||||
|
||||
/// Creates a task to set the engine's last sync time, in milliseconds.
|
||||
#[inline]
|
||||
pub fn for_set_last_sync(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
last_sync_millis: i64,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
Self::with_ferry(engine, Ferry::SetLastSync(last_sync_millis), callback)
|
||||
}
|
||||
|
||||
/// Creates a task to fetch the engine's sync ID.
|
||||
#[inline]
|
||||
pub fn for_sync_id(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
Self::with_ferry(engine, Ferry::SyncId, callback)
|
||||
}
|
||||
|
||||
/// Creates a task to reset the engine's sync ID and all its local Sync
|
||||
/// metadata.
|
||||
#[inline]
|
||||
pub fn for_reset_sync_id(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
Self::with_ferry(engine, Ferry::ResetSyncId, callback)
|
||||
}
|
||||
|
||||
/// Creates a task to compare the bridged engine's local sync ID with
|
||||
/// the `new_sync_id` from `meta/global`, and ferry back the final sync ID
|
||||
/// to use.
|
||||
#[inline]
|
||||
pub fn for_ensure_current_sync_id(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
new_sync_id: &nsACString,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
Self::with_ferry(
|
||||
engine,
|
||||
Ferry::EnsureCurrentSyncId(std::str::from_utf8(new_sync_id)?.into()),
|
||||
callback,
|
||||
)
|
||||
}
|
||||
|
||||
/// Creates a task to signal that the engine is about to sync.
|
||||
#[inline]
|
||||
pub fn for_sync_started(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
Self::with_ferry(engine, Ferry::SyncStarted, callback)
|
||||
}
|
||||
|
||||
/// Creates a task to store incoming records.
|
||||
pub fn for_store_incoming(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
incoming_envelopes_json: &[nsCString],
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
Self::with_ferry(
|
||||
engine,
|
||||
Ferry::StoreIncoming(incoming_envelopes_json.to_vec()),
|
||||
callback,
|
||||
)
|
||||
}
|
||||
|
||||
/// Creates a task to mark a subset of outgoing records as uploaded. This
|
||||
/// may be called multiple times per sync, or not at all if there are no
|
||||
/// records to upload.
|
||||
pub fn for_set_uploaded(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
server_modified_millis: i64,
|
||||
uploaded_ids: &[nsCString],
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
let uploaded_ids = uploaded_ids.iter().map(|id| Guid::from_slice(id)).collect();
|
||||
Self::with_ferry(
|
||||
engine,
|
||||
Ferry::SetUploaded(server_modified_millis, uploaded_ids),
|
||||
callback,
|
||||
)
|
||||
}
|
||||
|
||||
/// Creates a task to signal that all records have been uploaded, and
|
||||
/// the engine has been synced. This is called even if there were no
|
||||
/// records uploaded.
|
||||
#[inline]
|
||||
pub fn for_sync_finished(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
Self::with_ferry(engine, Ferry::SyncFinished, callback)
|
||||
}
|
||||
|
||||
/// Creates a task to reset all local Sync state for the engine, without
|
||||
/// erasing user data.
|
||||
#[inline]
|
||||
pub fn for_reset(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
Self::with_ferry(engine, Ferry::Reset, callback)
|
||||
}
|
||||
|
||||
/// Creates a task to erase all local user data for the engine.
|
||||
#[inline]
|
||||
pub fn for_wipe(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
Self::with_ferry(engine, Ferry::Wipe, callback)
|
||||
}
|
||||
|
||||
/// Creates a task for a ferry. The `callback` is bound to the current
|
||||
/// thread, and will be called once, after the ferry returns from the
|
||||
/// background thread.
|
||||
fn with_ferry(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
ferry: Ferry,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<FerryTask> {
|
||||
let name = ferry.name();
|
||||
Ok(FerryTask {
|
||||
engine,
|
||||
ferry,
|
||||
callback: ThreadPtrHolder::new(
|
||||
cstr!("mozIBridgedSyncEngineCallback"),
|
||||
RefPtr::new(callback),
|
||||
)?,
|
||||
result: AtomicRefCell::new(Err(Error::DidNotRun(name).into())),
|
||||
})
|
||||
}
|
||||
|
||||
/// Dispatches the task to the given thread `target`.
|
||||
pub fn dispatch(self, target: &nsIEventTarget) -> Result<()> {
|
||||
let runnable = TaskRunnable::new(self.ferry.name(), Box::new(self))?;
|
||||
// `may_block` schedules the task on the I/O thread pool, since we
|
||||
// expect most operations to wait on I/O.
|
||||
TaskRunnable::dispatch_with_options(
|
||||
runnable,
|
||||
target,
|
||||
DispatchOptions::default().may_block(true),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Runs the task on the background thread. This is split out into its own
|
||||
/// method to make error handling easier.
|
||||
fn inner_run(&self) -> anyhow::Result<FerryResult> {
|
||||
let engine = &self.engine;
|
||||
Ok(match &self.ferry {
|
||||
Ferry::LastSync => FerryResult::LastSync(engine.last_sync()?),
|
||||
Ferry::SetLastSync(last_sync_millis) => {
|
||||
engine.set_last_sync(*last_sync_millis)?;
|
||||
FerryResult::default()
|
||||
}
|
||||
Ferry::SyncId => FerryResult::SyncId(engine.sync_id()?),
|
||||
Ferry::ResetSyncId => FerryResult::AssignedSyncId(engine.reset_sync_id()?),
|
||||
Ferry::EnsureCurrentSyncId(new_sync_id) => {
|
||||
FerryResult::AssignedSyncId(engine.ensure_current_sync_id(new_sync_id)?)
|
||||
}
|
||||
Ferry::SyncStarted => {
|
||||
engine.sync_started()?;
|
||||
FerryResult::default()
|
||||
}
|
||||
Ferry::StoreIncoming(incoming_envelopes_json) => {
|
||||
let incoming_envelopes = incoming_envelopes_json
|
||||
.iter()
|
||||
.map(|envelope| Ok(serde_json::from_slice(envelope)?))
|
||||
.collect::<Result<_>>()?;
|
||||
|
||||
engine.store_incoming(incoming_envelopes)?;
|
||||
FerryResult::default()
|
||||
}
|
||||
Ferry::SetUploaded(server_modified_millis, uploaded_ids) => {
|
||||
engine.set_uploaded(*server_modified_millis, uploaded_ids.as_slice())?;
|
||||
FerryResult::default()
|
||||
}
|
||||
Ferry::SyncFinished => {
|
||||
engine.sync_finished()?;
|
||||
FerryResult::default()
|
||||
}
|
||||
Ferry::Reset => {
|
||||
engine.reset()?;
|
||||
FerryResult::default()
|
||||
}
|
||||
Ferry::Wipe => {
|
||||
engine.wipe()?;
|
||||
FerryResult::default()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Task for FerryTask {
|
||||
fn run(&self) {
|
||||
*self.result.borrow_mut() = self.inner_run();
|
||||
}
|
||||
|
||||
fn done(&self) -> result::Result<(), nsresult> {
|
||||
let callback = self.callback.get().unwrap();
|
||||
match mem::replace(
|
||||
&mut *self.result.borrow_mut(),
|
||||
Err(Error::DidNotRun(self.ferry.name()).into()),
|
||||
) {
|
||||
Ok(result) => unsafe { callback.HandleSuccess(result.into_variant().coerce()) },
|
||||
Err(err) => {
|
||||
let mut message = nsCString::new();
|
||||
write!(message, "{err}").unwrap();
|
||||
unsafe { callback.HandleError(NS_ERROR_FAILURE, &*message) }
|
||||
}
|
||||
}
|
||||
.to_result()
|
||||
}
|
||||
}
|
||||
|
||||
/// An apply task ferries incoming records to an engine on a background
|
||||
/// thread, and ferries back records to upload. It's separate from
|
||||
/// `FerryTask` because its callback type is different.
|
||||
pub struct ApplyTask {
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
callback: ThreadPtrHandle<mozIBridgedSyncEngineApplyCallback>,
|
||||
result: AtomicRefCell<anyhow::Result<Vec<String>>>,
|
||||
}
|
||||
|
||||
impl ApplyTask {
|
||||
/// Returns the task name for debugging.
|
||||
pub fn name() -> &'static str {
|
||||
concat!(module_path!(), "apply")
|
||||
}
|
||||
|
||||
/// Runs the task on the background thread.
|
||||
fn inner_run(&self) -> anyhow::Result<Vec<String>> {
|
||||
let ApplyResults {
|
||||
records: outgoing_records,
|
||||
..
|
||||
} = self.engine.apply()?;
|
||||
let outgoing_records_json = outgoing_records
|
||||
.iter()
|
||||
.map(|record| Ok(serde_json::to_string(record)?))
|
||||
.collect::<Result<_>>()?;
|
||||
Ok(outgoing_records_json)
|
||||
}
|
||||
|
||||
/// Creates a task. The `callback` is bound to the current thread, and will
|
||||
/// be called once, after the records are applied on the background thread.
|
||||
pub fn new(
|
||||
engine: Box<dyn BridgedEngine>,
|
||||
callback: &mozIBridgedSyncEngineApplyCallback,
|
||||
) -> Result<ApplyTask> {
|
||||
Ok(ApplyTask {
|
||||
engine,
|
||||
callback: ThreadPtrHolder::new(
|
||||
cstr!("mozIBridgedSyncEngineApplyCallback"),
|
||||
RefPtr::new(callback),
|
||||
)?,
|
||||
result: AtomicRefCell::new(Err(Error::DidNotRun(Self::name()).into())),
|
||||
})
|
||||
}
|
||||
|
||||
/// Dispatches the task to the given thread `target`.
|
||||
pub fn dispatch(self, target: &nsIEventTarget) -> Result<()> {
|
||||
let runnable = TaskRunnable::new(Self::name(), Box::new(self))?;
|
||||
TaskRunnable::dispatch_with_options(
|
||||
runnable,
|
||||
target,
|
||||
DispatchOptions::default().may_block(true),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Task for ApplyTask {
|
||||
fn run(&self) {
|
||||
*self.result.borrow_mut() = self.inner_run();
|
||||
}
|
||||
|
||||
fn done(&self) -> result::Result<(), nsresult> {
|
||||
let callback = self.callback.get().unwrap();
|
||||
match mem::replace(
|
||||
&mut *self.result.borrow_mut(),
|
||||
Err(Error::DidNotRun(Self::name()).into()),
|
||||
) {
|
||||
Ok(envelopes) => {
|
||||
let result = envelopes
|
||||
.into_iter()
|
||||
.map(nsCString::from)
|
||||
.collect::<ThinVec<_>>();
|
||||
unsafe { callback.HandleSuccess(&result) }
|
||||
}
|
||||
Err(err) => {
|
||||
let mut message = nsCString::new();
|
||||
write!(message, "{err}").unwrap();
|
||||
unsafe { callback.HandleError(NS_ERROR_FAILURE, &*message) }
|
||||
}
|
||||
}
|
||||
.to_result()
|
||||
}
|
||||
}
|
||||
@@ -124,6 +124,25 @@ class BridgedRecord extends RawCryptoWrapper {
|
||||
}
|
||||
}
|
||||
|
||||
class BridgeError extends Error {
|
||||
constructor(code, message) {
|
||||
super(message);
|
||||
this.name = "BridgeError";
|
||||
// TODO: We may want to use a different name for this, since errors with
|
||||
// a `result` property are treated specially by telemetry, discarding the
|
||||
// message...but, unlike other `nserror`s, the message is actually useful,
|
||||
// and we still want to capture it.
|
||||
this.result = code;
|
||||
}
|
||||
}
|
||||
|
||||
class InterruptedError extends Error {
|
||||
constructor(message) {
|
||||
super(message);
|
||||
this.name = "InterruptedError";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapts a `Log.sys.mjs` logger to a `mozIServicesLogSink`. This class is copied
|
||||
* from `SyncedBookmarksMirror.sys.mjs`.
|
||||
@@ -167,11 +186,114 @@ export class LogAdapter {
|
||||
}
|
||||
}
|
||||
|
||||
// This converts the XPCOM-defined, callback-based mozIBridgedSyncEngine to
|
||||
// a promise-based implementation.
|
||||
export class BridgeWrapperXPCOM {
|
||||
constructor(component) {
|
||||
this.comp = component;
|
||||
}
|
||||
|
||||
// A few sync, non-callback based attributes.
|
||||
get storageVersion() {
|
||||
return this.comp.storageVersion;
|
||||
}
|
||||
|
||||
get allowSkippedRecord() {
|
||||
return this.comp.allowSkippedRecord;
|
||||
}
|
||||
|
||||
get logger() {
|
||||
return this.comp.logger;
|
||||
}
|
||||
|
||||
// And the async functions we promisify.
|
||||
// Note this is `lastSync` via uniffi but `getLastSync` via xpcom
|
||||
lastSync() {
|
||||
return BridgeWrapperXPCOM.#promisify(this.comp.getLastSync);
|
||||
}
|
||||
|
||||
setLastSync(lastSyncMillis) {
|
||||
return BridgeWrapperXPCOM.#promisify(this.comp.setLastSync, lastSyncMillis);
|
||||
}
|
||||
|
||||
getSyncId() {
|
||||
return BridgeWrapperXPCOM.#promisify(this.comp.getSyncId);
|
||||
}
|
||||
|
||||
resetSyncId() {
|
||||
return BridgeWrapperXPCOM.#promisify(this.comp.resetSyncId);
|
||||
}
|
||||
|
||||
ensureCurrentSyncId(newSyncId) {
|
||||
return BridgeWrapperXPCOM.#promisify(
|
||||
this.comp.ensureCurrentSyncId,
|
||||
newSyncId
|
||||
);
|
||||
}
|
||||
|
||||
syncStarted() {
|
||||
return BridgeWrapperXPCOM.#promisify(this.comp.syncStarted);
|
||||
}
|
||||
|
||||
storeIncoming(incomingEnvelopesAsJSON) {
|
||||
return BridgeWrapperXPCOM.#promisify(
|
||||
this.comp.storeIncoming,
|
||||
incomingEnvelopesAsJSON
|
||||
);
|
||||
}
|
||||
|
||||
apply() {
|
||||
return BridgeWrapperXPCOM.#promisify(this.comp.apply);
|
||||
}
|
||||
|
||||
setUploaded(newTimestampMillis, uploadedIds) {
|
||||
return BridgeWrapperXPCOM.#promisify(
|
||||
this.comp.setUploaded,
|
||||
newTimestampMillis,
|
||||
uploadedIds
|
||||
);
|
||||
}
|
||||
|
||||
syncFinished() {
|
||||
return BridgeWrapperXPCOM.#promisify(this.comp.syncFinished);
|
||||
}
|
||||
|
||||
reset() {
|
||||
return BridgeWrapperXPCOM.#promisify(this.comp.reset);
|
||||
}
|
||||
|
||||
wipe() {
|
||||
return BridgeWrapperXPCOM.#promisify(this.comp.wipe);
|
||||
}
|
||||
|
||||
// Converts a XPCOM bridged function that takes a callback into one that returns a
|
||||
// promise.
|
||||
static #promisify(func, ...params) {
|
||||
return new Promise((resolve, reject) => {
|
||||
func(...params, {
|
||||
// This object implicitly implements all three callback interfaces
|
||||
// (`mozIBridgedSyncEngine{Apply, Result}Callback`), because they have
|
||||
// the same methods. The only difference is the type of the argument
|
||||
// passed to `handleSuccess`, which doesn't matter in JS.
|
||||
handleSuccess: resolve,
|
||||
handleError(code, message) {
|
||||
reject(transformError(code, message));
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A base class used to plug a Rust engine into Sync, and have it work like any
|
||||
* other engine. The constructor takes a bridge as its first argument, which is
|
||||
* a "bridged sync engine", as defined by UniFFI in the application-services
|
||||
* crate.
|
||||
* For backwards compatibility, this can also be an instance of an XPCOM
|
||||
* component class that implements `mozIBridgedSyncEngine`, wrapped in
|
||||
* a `BridgeWrapperXPCOM` wrapper.
|
||||
* (Note that at time of writing, the above is slightly aspirational; the
|
||||
* actual definition of the UniFFI shared bridged engine is still in flux.)
|
||||
*
|
||||
* This class inherits from `SyncEngine`, which has a lot of machinery that we
|
||||
* don't need, but that's fairly easy to override. It would be harder to
|
||||
@@ -365,3 +487,13 @@ BridgedEngine.prototype = {
|
||||
},
|
||||
};
|
||||
Object.setPrototypeOf(BridgedEngine.prototype, SyncEngine.prototype);
|
||||
|
||||
function transformError(code, message) {
|
||||
switch (code) {
|
||||
case Cr.NS_ERROR_ABORT:
|
||||
return new InterruptedError(message);
|
||||
|
||||
default:
|
||||
return new BridgeError(code, message);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,11 @@
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
const STORAGE_VERSION = 1; // This needs to be kept in-sync with the rust storage version
|
||||
|
||||
import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
|
||||
|
||||
import {
|
||||
BridgedEngine,
|
||||
BridgeWrapperXPCOM,
|
||||
LogAdapter,
|
||||
} from "resource://services-sync/bridged_engine.sys.mjs";
|
||||
import { SyncEngine, Tracker } from "resource://services-sync/engines.sys.mjs";
|
||||
@@ -15,16 +15,22 @@ const lazy = {};
|
||||
|
||||
ChromeUtils.defineESModuleGetters(lazy, {
|
||||
MULTI_DEVICE_THRESHOLD: "resource://services-sync/constants.sys.mjs",
|
||||
Observers: "resource://services-common/observers.sys.mjs",
|
||||
SCORE_INCREMENT_MEDIUM: "resource://services-sync/constants.sys.mjs",
|
||||
Svc: "resource://services-sync/util.sys.mjs",
|
||||
extensionStorageSync: "resource://gre/modules/ExtensionStorageSync.sys.mjs",
|
||||
storageSyncService:
|
||||
"resource://gre/modules/ExtensionStorageComponents.sys.mjs",
|
||||
|
||||
extensionStorageSyncKinto:
|
||||
"resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs",
|
||||
});
|
||||
|
||||
XPCOMUtils.defineLazyServiceGetter(
|
||||
lazy,
|
||||
"StorageSyncService",
|
||||
"@mozilla.org/extensions/storage/sync;1",
|
||||
"nsIInterfaceRequestor"
|
||||
);
|
||||
|
||||
const PREF_FORCE_ENABLE = "engine.extension-storage.force";
|
||||
|
||||
// A helper to indicate whether extension-storage is enabled - it's based on
|
||||
@@ -63,7 +69,11 @@ function setEngineEnabled(enabled) {
|
||||
|
||||
// A "bridged engine" to our webext-storage component.
|
||||
export function ExtensionStorageEngineBridge(service) {
|
||||
this.component = lazy.StorageSyncService.getInterface(
|
||||
Ci.mozIBridgedSyncEngine
|
||||
);
|
||||
BridgedEngine.call(this, "Extension-Storage", service);
|
||||
this._bridge = new BridgeWrapperXPCOM(this.component);
|
||||
|
||||
let app_services_logger = Cc["@mozilla.org/appservices/logger;1"].getService(
|
||||
Ci.mozIAppServicesLogger
|
||||
@@ -78,44 +88,78 @@ ExtensionStorageEngineBridge.prototype = {
|
||||
// Used to override the engine name in telemetry, so that we can distinguish .
|
||||
overrideTelemetryName: "rust-webext-storage",
|
||||
|
||||
async initialize() {
|
||||
await SyncEngine.prototype.initialize.call(this);
|
||||
this._rustStore = await lazy.storageSyncService.getStorageAreaInstance();
|
||||
this._bridge = await this._rustStore.bridgedEngine();
|
||||
|
||||
// Uniffi currently only supports async methods, so we'll need to hardcode
|
||||
// these values for now (which is fine for now as these hardly ever change)
|
||||
this._bridge.storageVersion = STORAGE_VERSION;
|
||||
this._bridge.allowSkippedRecord = true;
|
||||
this._bridge.getSyncId = async () => {
|
||||
let syncID = await this._bridge.syncId();
|
||||
return syncID;
|
||||
};
|
||||
|
||||
this._log.info("Got a bridged engine!");
|
||||
this._tracker.modified = true;
|
||||
_notifyPendingChanges() {
|
||||
return new Promise(resolve => {
|
||||
this.component
|
||||
.QueryInterface(Ci.mozISyncedExtensionStorageArea)
|
||||
.fetchPendingSyncChanges({
|
||||
QueryInterface: ChromeUtils.generateQI([
|
||||
"mozIExtensionStorageListener",
|
||||
"mozIExtensionStorageCallback",
|
||||
]),
|
||||
onChanged: (extId, json) => {
|
||||
try {
|
||||
lazy.extensionStorageSync.notifyListeners(
|
||||
extId,
|
||||
JSON.parse(json)
|
||||
);
|
||||
} catch (ex) {
|
||||
this._log.warn(
|
||||
`Error notifying change listeners for ${extId}`,
|
||||
ex
|
||||
);
|
||||
}
|
||||
},
|
||||
handleSuccess: resolve,
|
||||
handleError: (code, message) => {
|
||||
this._log.warn(
|
||||
"Error fetching pending synced changes",
|
||||
message,
|
||||
code
|
||||
);
|
||||
resolve();
|
||||
},
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
async _notifyPendingChanges() {
|
||||
try {
|
||||
let changeSets = await this._rustStore.getSyncedChanges();
|
||||
_takeMigrationInfo() {
|
||||
return new Promise(resolve => {
|
||||
this.component
|
||||
.QueryInterface(Ci.mozIExtensionStorageArea)
|
||||
.takeMigrationInfo({
|
||||
QueryInterface: ChromeUtils.generateQI([
|
||||
"mozIExtensionStorageCallback",
|
||||
]),
|
||||
handleSuccess: result => {
|
||||
resolve(result ? JSON.parse(result) : null);
|
||||
},
|
||||
handleError: (code, message) => {
|
||||
this._log.warn("Error fetching migration info", message, code);
|
||||
// `takeMigrationInfo` doesn't actually perform the migration,
|
||||
// just reads (and clears) any data stored in the DB from the
|
||||
// previous migration.
|
||||
//
|
||||
// Any errors here are very likely occurring a good while
|
||||
// after the migration ran, so we just warn and pretend
|
||||
// nothing was there.
|
||||
resolve(null);
|
||||
},
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
changeSets.forEach(changeSet => {
|
||||
try {
|
||||
lazy.extensionStorageSync.notifyListeners(
|
||||
changeSet.extId,
|
||||
JSON.parse(changeSet.changes)
|
||||
);
|
||||
} catch (ex) {
|
||||
this._log.warn(
|
||||
`Error notifying change listeners for ${changeSet.extId}`,
|
||||
ex
|
||||
);
|
||||
}
|
||||
});
|
||||
} catch (ex) {
|
||||
this._log.warn("Error fetching pending synced changes", ex);
|
||||
async _syncStartup() {
|
||||
let result = await super._syncStartup();
|
||||
let info = await this._takeMigrationInfo();
|
||||
if (info) {
|
||||
lazy.Observers.notify(
|
||||
"weave:telemetry:migration",
|
||||
info,
|
||||
"webext-storage"
|
||||
);
|
||||
}
|
||||
return result;
|
||||
},
|
||||
|
||||
async _processIncoming() {
|
||||
|
||||
@@ -1,13 +1,24 @@
|
||||
/* Any copyright is dedicated to the Public Domain.
|
||||
* http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
const { BridgedEngine } = ChromeUtils.importESModule(
|
||||
const { BridgedEngine, BridgeWrapperXPCOM } = ChromeUtils.importESModule(
|
||||
"resource://services-sync/bridged_engine.sys.mjs"
|
||||
);
|
||||
const { Service } = ChromeUtils.importESModule(
|
||||
"resource://services-sync/service.sys.mjs"
|
||||
);
|
||||
|
||||
// Wraps an `object` in a proxy so that its methods are bound to it. This
|
||||
// simulates how XPCOM class instances have all their methods bound.
|
||||
function withBoundMethods(object) {
|
||||
return new Proxy(object, {
|
||||
get(target, key) {
|
||||
let value = target[key];
|
||||
return typeof value == "function" ? value.bind(target) : value;
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
add_task(async function test_interface() {
|
||||
class TestBridge {
|
||||
constructor() {
|
||||
@@ -28,32 +39,35 @@ add_task(async function test_interface() {
|
||||
|
||||
// `mozIBridgedSyncEngine` methods.
|
||||
|
||||
lastSync() {
|
||||
return this.lastSyncMillis;
|
||||
getLastSync(callback) {
|
||||
CommonUtils.nextTick(() => callback.handleSuccess(this.lastSyncMillis));
|
||||
}
|
||||
|
||||
setLastSync(millis) {
|
||||
setLastSync(millis, callback) {
|
||||
this.lastSyncMillis = millis;
|
||||
CommonUtils.nextTick(() => callback.handleSuccess());
|
||||
}
|
||||
|
||||
resetSyncId() {
|
||||
return this.syncID;
|
||||
resetSyncId(callback) {
|
||||
CommonUtils.nextTick(() => callback.handleSuccess(this.syncID));
|
||||
}
|
||||
|
||||
ensureCurrentSyncId(newSyncId) {
|
||||
ensureCurrentSyncId(newSyncId, callback) {
|
||||
equal(newSyncId, this.syncID, "Local and new sync IDs should match");
|
||||
return this.syncID;
|
||||
CommonUtils.nextTick(() => callback.handleSuccess(this.syncID));
|
||||
}
|
||||
|
||||
syncStarted() {
|
||||
syncStarted(callback) {
|
||||
this.wasSyncStarted = true;
|
||||
CommonUtils.nextTick(() => callback.handleSuccess());
|
||||
}
|
||||
|
||||
storeIncoming(envelopes) {
|
||||
storeIncoming(envelopes, callback) {
|
||||
this.incomingEnvelopes.push(...envelopes.map(r => JSON.parse(r)));
|
||||
CommonUtils.nextTick(() => callback.handleSuccess());
|
||||
}
|
||||
|
||||
apply() {
|
||||
apply(callback) {
|
||||
let outgoingEnvelopes = [
|
||||
{
|
||||
id: "hanson",
|
||||
@@ -75,31 +89,35 @@ add_task(async function test_interface() {
|
||||
payload: JSON.stringify(cleartext),
|
||||
})
|
||||
);
|
||||
return outgoingEnvelopes;
|
||||
CommonUtils.nextTick(() => callback.handleSuccess(outgoingEnvelopes));
|
||||
}
|
||||
|
||||
setUploaded(millis, ids) {
|
||||
setUploaded(millis, ids, callback) {
|
||||
this.uploadedIDs.push(...ids);
|
||||
CommonUtils.nextTick(() => callback.handleSuccess());
|
||||
}
|
||||
|
||||
syncFinished() {
|
||||
syncFinished(callback) {
|
||||
this.wasSyncFinished = true;
|
||||
CommonUtils.nextTick(() => callback.handleSuccess());
|
||||
}
|
||||
|
||||
reset() {
|
||||
reset(callback) {
|
||||
this.clear();
|
||||
this.wasReset = true;
|
||||
CommonUtils.nextTick(() => callback.handleSuccess());
|
||||
}
|
||||
|
||||
wipe() {
|
||||
wipe(callback) {
|
||||
this.clear();
|
||||
this.wasWiped = true;
|
||||
CommonUtils.nextTick(() => callback.handleSuccess());
|
||||
}
|
||||
}
|
||||
|
||||
let bridge = new TestBridge();
|
||||
let engine = new BridgedEngine("Nineties", Service);
|
||||
engine._bridge = bridge;
|
||||
engine._bridge = new BridgeWrapperXPCOM(withBoundMethods(bridge));
|
||||
engine.enabled = true;
|
||||
|
||||
let server = await serverForFoo(engine);
|
||||
|
||||
@@ -13,6 +13,10 @@ const { ExtensionStorageEngineBridge, ExtensionStorageEngineKinto } =
|
||||
"resource://services-sync/engines/extension-storage.sys.mjs"
|
||||
);
|
||||
|
||||
const { BridgeWrapperXPCOM } = ChromeUtils.importESModule(
|
||||
"resource://services-sync/bridged_engine.sys.mjs"
|
||||
);
|
||||
|
||||
Services.prefs.setStringPref("webextensions.storage.sync.log.level", "debug");
|
||||
|
||||
add_task(async function test_switching_between_kinto_and_bridged() {
|
||||
@@ -103,7 +107,6 @@ add_task(async function test_enable() {
|
||||
|
||||
add_task(async function test_notifyPendingChanges() {
|
||||
let engine = new ExtensionStorageEngineBridge(Service);
|
||||
await engine.initialize();
|
||||
|
||||
let extension = { id: "ext-1" };
|
||||
let expectedChange = {
|
||||
@@ -114,43 +117,56 @@ add_task(async function test_notifyPendingChanges() {
|
||||
let lastSync = 0;
|
||||
let syncID = Utils.makeGUID();
|
||||
let error = null;
|
||||
engine._rustStore = {
|
||||
getSyncedChanges() {
|
||||
if (error) {
|
||||
throw new Error(error.message);
|
||||
} else {
|
||||
return [
|
||||
{ extId: extension.id, changes: JSON.stringify(expectedChange) },
|
||||
];
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
engine._bridge = {
|
||||
ensureCurrentSyncId(id) {
|
||||
engine.component = {
|
||||
QueryInterface: ChromeUtils.generateQI([
|
||||
"mozIBridgedSyncEngine",
|
||||
"mozIExtensionStorageArea",
|
||||
"mozISyncedExtensionStorageArea",
|
||||
]),
|
||||
ensureCurrentSyncId(id, callback) {
|
||||
if (syncID != id) {
|
||||
syncID = id;
|
||||
lastSync = 0;
|
||||
}
|
||||
return id;
|
||||
callback.handleSuccess(id);
|
||||
},
|
||||
resetSyncId() {
|
||||
return syncID;
|
||||
resetSyncId(callback) {
|
||||
callback.handleSuccess(syncID);
|
||||
},
|
||||
syncStarted() {},
|
||||
lastSync() {
|
||||
return lastSync;
|
||||
syncStarted(callback) {
|
||||
callback.handleSuccess();
|
||||
},
|
||||
setLastSync(lastSyncMillis) {
|
||||
getLastSync(callback) {
|
||||
callback.handleSuccess(lastSync);
|
||||
},
|
||||
setLastSync(lastSyncMillis, callback) {
|
||||
lastSync = lastSyncMillis;
|
||||
callback.handleSuccess();
|
||||
},
|
||||
apply() {
|
||||
return [];
|
||||
apply(callback) {
|
||||
callback.handleSuccess([]);
|
||||
},
|
||||
fetchPendingSyncChanges(callback) {
|
||||
if (error) {
|
||||
callback.handleError(Cr.NS_ERROR_FAILURE, error.message);
|
||||
} else {
|
||||
callback.onChanged(extension.id, JSON.stringify(expectedChange));
|
||||
callback.handleSuccess();
|
||||
}
|
||||
},
|
||||
setUploaded(modified, ids, callback) {
|
||||
callback.handleSuccess();
|
||||
},
|
||||
syncFinished(callback) {
|
||||
callback.handleSuccess();
|
||||
},
|
||||
takeMigrationInfo(callback) {
|
||||
callback.handleSuccess(null);
|
||||
},
|
||||
setUploaded(_modified, _ids) {},
|
||||
syncFinished() {},
|
||||
};
|
||||
|
||||
engine._bridge = new BridgeWrapperXPCOM(engine.component);
|
||||
|
||||
let server = await serverForFoo(engine);
|
||||
|
||||
let actualChanges = [];
|
||||
|
||||
@@ -119,7 +119,7 @@ add_task(async function test_calling_sync_calls_ext_storage_sync() {
|
||||
returns: Promise.resolve(),
|
||||
}));
|
||||
try {
|
||||
await withContext(async function (context) {
|
||||
await withSyncContext(async function (context) {
|
||||
// Set something so that everyone knows that we're using storage.sync
|
||||
await extensionStorageSync.set(extension, { a: "b" }, context);
|
||||
let ping = await sync_engine_and_validate_telem(engine, false);
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
/* Any copyright is dedicated to the Public Domain.
|
||||
* http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
// Import the rust-based and kinto-based implementations. Not great to grab
|
||||
// these as they're somewhat private, but we want to run the pings through our
|
||||
// validation machinery which is here in the sync test code.
|
||||
const { extensionStorageSync: rustImpl } = ChromeUtils.importESModule(
|
||||
"resource://gre/modules/ExtensionStorageSync.sys.mjs"
|
||||
);
|
||||
const { extensionStorageSyncKinto: kintoImpl } = ChromeUtils.importESModule(
|
||||
"resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs"
|
||||
);
|
||||
|
||||
const { Service } = ChromeUtils.importESModule(
|
||||
"resource://services-sync/service.sys.mjs"
|
||||
);
|
||||
const { ExtensionStorageEngineBridge } = ChromeUtils.importESModule(
|
||||
"resource://services-sync/engines/extension-storage.sys.mjs"
|
||||
);
|
||||
|
||||
Services.prefs.setBoolPref("webextensions.storage.sync.kinto", false);
|
||||
Services.prefs.setStringPref("webextensions.storage.sync.log.level", "debug");
|
||||
|
||||
// It's tricky to force error cases here (the databases are opened with
|
||||
// exclusive locks) and that part of the code has coverage in the vendored
|
||||
// application-services webext-storage crate. So this just tests that the
|
||||
// migration data ends up in the ping, and exactly once.
|
||||
add_task(async function test_sync_migration_telem() {
|
||||
// Set some stuff using the kinto-based impl prior to fully setting up sync.
|
||||
let e1 = { id: "test@mozilla.com" };
|
||||
let c1 = { extension: e1, callOnClose() {} };
|
||||
|
||||
let e2 = { id: "test-2@mozilla.com" };
|
||||
let c2 = { extension: e2, callOnClose() {} };
|
||||
await kintoImpl.set(e1, { foo: "bar" }, c1);
|
||||
await kintoImpl.set(e1, { baz: "quux" }, c1);
|
||||
await kintoImpl.set(e2, { second: "2nd" }, c2);
|
||||
|
||||
Assert.deepEqual(await rustImpl.get(e1, "foo", c1), { foo: "bar" });
|
||||
Assert.deepEqual(await rustImpl.get(e1, "baz", c1), { baz: "quux" });
|
||||
Assert.deepEqual(await rustImpl.get(e2, null, c2), { second: "2nd" });
|
||||
|
||||
// Explicitly unregister first. It's very possible this isn't needed for this
|
||||
// case, however it's fairly harmless, we hope to uplift this patch to beta,
|
||||
// and earlier today we had beta-only problems caused by this (bug 1629116)
|
||||
await Service.engineManager.unregister("extension-storage");
|
||||
await Service.engineManager.register(ExtensionStorageEngineBridge);
|
||||
let engine = Service.engineManager.get("extension-storage");
|
||||
let server = await serverForFoo(engine, undefined);
|
||||
try {
|
||||
await SyncTestingInfrastructure(server);
|
||||
await Service.engineManager.switchAlternatives();
|
||||
|
||||
_("First sync");
|
||||
let ping = await sync_engine_and_validate_telem(engine, false, null, true);
|
||||
Assert.deepEqual(ping.migrations, [
|
||||
{
|
||||
type: "webext-storage",
|
||||
entries: 3,
|
||||
entriesSuccessful: 3,
|
||||
extensions: 2,
|
||||
extensionsSuccessful: 2,
|
||||
openFailure: false,
|
||||
},
|
||||
]);
|
||||
|
||||
// force another sync
|
||||
await engine.setLastSync(0);
|
||||
_("Second sync");
|
||||
|
||||
ping = await sync_engine_and_validate_telem(engine, false, null, true);
|
||||
Assert.deepEqual(ping.migrations, undefined);
|
||||
} finally {
|
||||
await kintoImpl.clear(e1, c1);
|
||||
await kintoImpl.clear(e2, c2);
|
||||
await rustImpl.clear(e1, c1);
|
||||
await rustImpl.clear(e2, c2);
|
||||
await promiseStopServer(server);
|
||||
await engine.finalize();
|
||||
}
|
||||
});
|
||||
@@ -29,13 +29,13 @@ add_task(async function test_changing_extension_storage_changes_score() {
|
||||
const tracker = engine._tracker;
|
||||
const extension = { id: "my-extension-id" };
|
||||
tracker.start();
|
||||
await withContext(async function (context) {
|
||||
await withSyncContext(async function (context) {
|
||||
await extensionStorageSync.set(extension, { a: "b" }, context);
|
||||
});
|
||||
Assert.equal(tracker.score, SCORE_INCREMENT_MEDIUM);
|
||||
|
||||
tracker.resetScore();
|
||||
await withContext(async function (context) {
|
||||
await withSyncContext(async function (context) {
|
||||
await extensionStorageSync.remove(extension, "a", context);
|
||||
});
|
||||
Assert.equal(tracker.score, SCORE_INCREMENT_MEDIUM);
|
||||
|
||||
@@ -121,6 +121,10 @@ run-sequentially = "extension-storage migration happens only once, and must be t
|
||||
skip-if = ["appname == 'thunderbird'"]
|
||||
run-sequentially = "extension-storage migration happens only once, and must be tested first."
|
||||
|
||||
["test_extension_storage_migration_telem.js"]
|
||||
skip-if = ["appname == 'thunderbird'"]
|
||||
run-sequentially = "extension-storage migration happens only once, and must be tested first."
|
||||
|
||||
["test_extension_storage_tracker_kinto.js"]
|
||||
skip-if = ["appname == 'thunderbird'"]
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"8663faef40fcdcb0058e42824fed421b728a144acadf2ca8a988739869d0f52b","src/lib.rs":"e923dc8a43c75665d99891aea581cab423814cba3c69ca3709b59e9a62fbe98c"},"package":null}
|
||||
{"files":{"Cargo.toml":"8663faef40fcdcb0058e42824fed421b728a144acadf2ca8a988739869d0f52b","src/lib.rs":"67740f320fb8012c7df017ed2ad5ebe130940a3bb73d42e0445b31b39f844d79"},"package":null}
|
||||
@@ -13,7 +13,7 @@ const ERR_MSG: &str = "Expected #[handle_error(path::to::Error)]";
|
||||
///
|
||||
/// Additionally, this procedural macro has side effects, including:
|
||||
/// * It would log the error based on a pre-defined log level. The log level is defined
|
||||
/// in the [`error_support::ErrorHandling`] implementation.
|
||||
/// in the [`error_support::ErrorHandling`] implementation.
|
||||
/// * It would report some errors using an external error reporter, in practice, this
|
||||
/// is implemented using Sentry in the app.
|
||||
///
|
||||
@@ -24,7 +24,7 @@ const ERR_MSG: &str = "Expected #[handle_error(path::to::Error)]";
|
||||
///#[derive(Debug, thiserror::Error)]
|
||||
/// struct Error {}
|
||||
/// type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
///
|
||||
|
||||
/// impl Display for Error {
|
||||
/// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
/// write!(f, "Internal Error!")
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"f94edc7d050656bb0e20fe2b2db8505642b79d89fb4057e60c31d98ea512e209","README.md":"69ccc6e378995b9d490d64e23b42ea1d7a9e3232e3dae6fabf1f955786a49931","build.rs":"c8d3c38c1208eea36224662b284d8daf3e7ad1b07d22d750524f3da1cc66ccca","src/errorsupport.udl":"8f8e5711913ffd1b515ec60028529768990df51001e6125d4b83c948b41c4466","src/handling.rs":"6e0568b18d426531cb2ae9967c8dd0d51ece5a065f68b15eeb308b995edaa167","src/lib.rs":"3fd547de9565a2d63af32b4dae0357aafc2216bde173251bf76dd0c6b121a30c","src/macros.rs":"0d03f82fab20c96a182f941baf3fcf2a286b00fea871ee7fd8e339abc14f9522","src/redact.rs":"c9a4df1a87be68b15d583587bda941d4c60a1d0449e2d43ff99f3611a290a863","src/reporting.rs":"b8e03402edf3111718fc9c2ec179622307f4a117db05ac220ead631c9de28362","uniffi.toml":"af91bcd8e7b1fa3f475a5e556979ff23c57b338395e0b65abc1cb1a0ee823e23"},"package":null}
|
||||
{"files":{"Cargo.toml":"a9dd0810aae7f5ac7fa3c4d53f74d8af761cb4858cef2249d5c16af0fd976fd4","README.md":"99fb739e79beb2c2d34f38d502cd758a1470b3ecf22c8f7fb05b97f324918cf4","build.rs":"c8d3c38c1208eea36224662b284d8daf3e7ad1b07d22d750524f3da1cc66ccca","src/errorsupport.udl":"8f8e5711913ffd1b515ec60028529768990df51001e6125d4b83c948b41c4466","src/handling.rs":"6e0568b18d426531cb2ae9967c8dd0d51ece5a065f68b15eeb308b995edaa167","src/lib.rs":"96ae3cc2c1077ae45442ace6b5b5311b86267d0b9067f3ff58396af30ccbbc07","src/macros.rs":"0d03f82fab20c96a182f941baf3fcf2a286b00fea871ee7fd8e339abc14f9522","src/redact.rs":"c9a4df1a87be68b15d583587bda941d4c60a1d0449e2d43ff99f3611a290a863","src/reporting.rs":"b8e03402edf3111718fc9c2ec179622307f4a117db05ac220ead631c9de28362","uniffi.toml":"af91bcd8e7b1fa3f475a5e556979ff23c57b338395e0b65abc1cb1a0ee823e23"},"package":null}
|
||||
3
third_party/rust/error-support/Cargo.toml
vendored
3
third_party/rust/error-support/Cargo.toml
vendored
@@ -50,6 +50,3 @@ version = "0.28.2"
|
||||
[build-dependencies.uniffi]
|
||||
version = "0.28.2"
|
||||
features = ["build"]
|
||||
|
||||
[lints.clippy]
|
||||
empty-line-after-doc-comments = "allow"
|
||||
|
||||
1
third_party/rust/error-support/README.md
vendored
1
third_party/rust/error-support/README.md
vendored
@@ -86,3 +86,4 @@ a user's database in their errors, which would then appear in our error
|
||||
variants. However, we've never seen that in practice so we are comfortable
|
||||
including the `rusqlite` error message in our error reports, without attempting
|
||||
to sanitize them.
|
||||
|
||||
|
||||
2
third_party/rust/error-support/src/lib.rs
vendored
2
third_party/rust/error-support/src/lib.rs
vendored
@@ -40,7 +40,7 @@ pub use handling::{convert_log_report_error, ErrorHandling, ErrorReporting, GetE
|
||||
|
||||
/// XXX - Most of this is now considered deprecated - only FxA uses it, and
|
||||
/// should be replaced with the facilities in the `handling` module.
|
||||
///
|
||||
|
||||
/// Define a wrapper around the the provided ErrorKind type.
|
||||
/// See also `define_error` which is more likely to be what you want.
|
||||
#[macro_export]
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"0a32ff972e8a1c74be5a4a028e630d56b8a07c266d4e32410c2eef3d0b161a33","src/compare.rs":"661d8beab68741564707496db1746d25195015c46aea89fc2dac59a0c8e6a698","src/error.rs":"812afbb14a012f3c2d8541c81a1c96bb31da4aba5b68384260ae9a3a2fc253c5","src/lib.rs":"dbe7e4c06ae58b45ae1f459fca70065ac2042935c6b9a213db95c11337f8c098","src/version.rs":"4e7955577478dfb4980b85a5bbb7004a9e633ba9c407137146fb272b3ffd671b","tests/test_versioning.rs":"756bb1e5dfc7a2747263755a21abdcac73cff79697fdf4fc49b61981b276d6c8"},"package":null}
|
||||
{"files":{"Cargo.toml":"0a32ff972e8a1c74be5a4a028e630d56b8a07c266d4e32410c2eef3d0b161a33","src/compare.rs":"661d8beab68741564707496db1746d25195015c46aea89fc2dac59a0c8e6a698","src/error.rs":"812afbb14a012f3c2d8541c81a1c96bb31da4aba5b68384260ae9a3a2fc253c5","src/lib.rs":"dbe7e4c06ae58b45ae1f459fca70065ac2042935c6b9a213db95c11337f8c098","src/version.rs":"efe6f32ff191ac110c3b9b1bb0c1e17beeb0a265c819cff59ecb7afae7d3cc56","tests/test_versioning.rs":"756bb1e5dfc7a2747263755a21abdcac73cff79697fdf4fc49b61981b276d6c8"},"package":null}
|
||||
@@ -233,7 +233,7 @@ impl TryFrom<&'_ str> for VersionPart {
|
||||
type Error = VersionParsingError;
|
||||
|
||||
fn try_from(value: &'_ str) -> Result<Self, Self::Error> {
|
||||
if !value.is_ascii() {
|
||||
if value.chars().any(|c| !c.is_ascii()) {
|
||||
return Err(VersionParsingError::ParseError(format!(
|
||||
"version string {} contains non-ascii characters",
|
||||
value
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"6765e7643f3e8ad46145166225fa93a08e8a5eb327eca1460340b29c29cd73f5","src/bin/generate-test-data.rs":"7cc80b56929091d02675b9dd9bf4c657a95cda502656cf2ec8d91f56d7a393c7","src/db.rs":"d9dd44501ee3b19c696d8830d3036f7bfe0e8ad7751d5a057f5d8295ebf0bd4f","src/error.rs":"3a1308e65440769d9435fc95528d4ef42994c84d88e1da04ba058491dea387c4","src/ingest.rs":"9f8f7584be5ed27dc962d9137eaa4730948356c724f687e03048a8370c9ed889","src/interest.rs":"e4369a1280867438bca12746f71288a03b4d5e180e156f4bc0335046012565f7","src/lib.rs":"e02c30074fe645fd0ffabe8a2b9fd400e25905c9a8e11030f77524ff7582dd92","src/ranker.rs":"e71414fe79ade26f3c79dceb5211af4f37984a9cded8c938dc1da8d8d28c2ad3","src/rs.rs":"fb12d29f75a59af1bfdd320ad01f9bb5a03cf5a3f84738ebdaccb67b84695eef","src/schema.rs":"38ea82679da2729a571aad936f96469e732ec1c104d7c21fd869842f7a5f30a3","src/url_hash.rs":"2e908316fb70923644d1990dbf470d69ce2f5e99b0c5c3d95ec691590be8ffa5","test-data":"1ef2cd092d59e7e126cd4a514af983d449ed9f9c98708702fd237464a76c2b5e"},"package":null}
|
||||
{"files":{"Cargo.toml":"6765e7643f3e8ad46145166225fa93a08e8a5eb327eca1460340b29c29cd73f5","src/bin/generate-test-data.rs":"7f1c9dc445418c7627f89d1f2aa8e550d0f85b3d1f05edb7c378ab9441714f1f","src/db.rs":"c3dc899e5c11c356f831ee318acb605f1a73607e879f04ef139e2c2258bf8655","src/error.rs":"3a1308e65440769d9435fc95528d4ef42994c84d88e1da04ba058491dea387c4","src/ingest.rs":"df2f09bc99d3bcc55bc21f47b6cac82ddcfb6b1063d9053d4e8de4c60cf08228","src/interest.rs":"e4369a1280867438bca12746f71288a03b4d5e180e156f4bc0335046012565f7","src/lib.rs":"1a355e9c0858664c4aa52bb4a144d55dd1de10568c9190899d34eda3c845f8cd","src/ranker.rs":"77763ef49cf1cdcc916e719602e03e6697e908005cfec5901c9a5459be650053","src/rs.rs":"fb12d29f75a59af1bfdd320ad01f9bb5a03cf5a3f84738ebdaccb67b84695eef","src/schema.rs":"38ea82679da2729a571aad936f96469e732ec1c104d7c21fd869842f7a5f30a3","src/url_hash.rs":"2e908316fb70923644d1990dbf470d69ce2f5e99b0c5c3d95ec691590be8ffa5","test-data":"1ef2cd092d59e7e126cd4a514af983d449ed9f9c98708702fd237464a76c2b5e"},"package":null}
|
||||
@@ -1,7 +1,3 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use relevancy::{
|
||||
url_hash::{hash_url, UrlHash},
|
||||
Interest,
|
||||
|
||||
103
third_party/rust/relevancy/src/db.rs
vendored
103
third_party/rust/relevancy/src/db.rs
vendored
@@ -21,16 +21,6 @@ pub struct RelevancyDb {
|
||||
writer: LazyDb<RelevancyConnectionInitializer>,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, uniffi::Record)]
|
||||
pub struct BanditData {
|
||||
pub bandit: String,
|
||||
pub arm: String,
|
||||
pub impressions: u64,
|
||||
pub clicks: u64,
|
||||
pub alpha: u64,
|
||||
pub beta: u64,
|
||||
}
|
||||
|
||||
impl RelevancyDb {
|
||||
pub fn new(path: impl AsRef<Path>) -> Self {
|
||||
// Note: use `SQLITE_OPEN_READ_WRITE` for both read and write connections.
|
||||
@@ -207,7 +197,7 @@ impl<'a> RelevancyDao<'a> {
|
||||
&self,
|
||||
bandit: &str,
|
||||
arm: &str,
|
||||
) -> Result<(u64, u64)> {
|
||||
) -> Result<(usize, usize)> {
|
||||
let mut stmt = self
|
||||
.conn
|
||||
.prepare("SELECT alpha, beta FROM multi_armed_bandit WHERE bandit=? AND arm=?")?;
|
||||
@@ -223,45 +213,6 @@ impl<'a> RelevancyDao<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the data for a specific bandit and arm combination from the database.
|
||||
///
|
||||
/// This method queries the `multi_armed_bandit` table to find a row matching the given
|
||||
/// `bandit` and `arm` values. If a matching row is found, it extracts the corresponding
|
||||
/// fields (`bandit`, `arm`, `impressions`, `clicks`, `alpha`, `beta`) and returns them
|
||||
/// as a `BanditData` struct. If no matching row is found, it returns a `BanditNotFound`
|
||||
/// error.
|
||||
pub fn retrieve_bandit_data(&self, bandit: &str, arm: &str) -> Result<BanditData> {
|
||||
let mut stmt = self
|
||||
.conn
|
||||
.prepare("SELECT bandit, arm, impressions, clicks, alpha, beta FROM multi_armed_bandit WHERE bandit=? AND arm=?")?;
|
||||
|
||||
let mut result = stmt.query((&bandit, &arm))?;
|
||||
|
||||
match result.next()? {
|
||||
Some(row) => {
|
||||
let bandit = row.get::<_, String>(0)?;
|
||||
let arm = row.get::<_, String>(1)?;
|
||||
let impressions = row.get::<_, u64>(2)?;
|
||||
let clicks = row.get::<_, u64>(3)?;
|
||||
let alpha = row.get::<_, u64>(4)?;
|
||||
let beta = row.get::<_, u64>(5)?;
|
||||
|
||||
Ok(BanditData {
|
||||
bandit,
|
||||
arm,
|
||||
impressions,
|
||||
clicks,
|
||||
alpha,
|
||||
beta,
|
||||
})
|
||||
}
|
||||
None => Err(BanditNotFound {
|
||||
bandit: bandit.to_string(),
|
||||
arm: arm.to_string(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the Beta distribution parameters and counters for a specific arm in a bandit model based on user interaction.
|
||||
///
|
||||
/// This method updates the `alpha` or `beta` parameters in the `multi_armed_bandit` table for the specified
|
||||
@@ -567,56 +518,4 @@ mod test {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_retrieve_bandit_data() -> Result<()> {
|
||||
let db = RelevancyDb::new_for_test();
|
||||
let bandit = "provider".to_string();
|
||||
let arm = "weather".to_string();
|
||||
|
||||
db.read_write(|dao| dao.initialize_multi_armed_bandit(&bandit, &arm))?;
|
||||
|
||||
// Update the bandit arm data (simulate interactions)
|
||||
db.read_write(|dao| dao.update_bandit_arm_data(&bandit, &arm, true))?;
|
||||
db.read_write(|dao| dao.update_bandit_arm_data(&bandit, &arm, false))?;
|
||||
db.read_write(|dao| dao.update_bandit_arm_data(&bandit, &arm, false))?;
|
||||
|
||||
let bandit_data = db.read(|dao| dao.retrieve_bandit_data(&bandit, &arm))?;
|
||||
|
||||
let expected_bandit_data = BanditData {
|
||||
bandit: bandit.clone(),
|
||||
arm: arm.clone(),
|
||||
impressions: 3, // 3 updates (true + false + false)
|
||||
clicks: 1, // 1 `true` interaction
|
||||
alpha: 2,
|
||||
beta: 3,
|
||||
};
|
||||
|
||||
assert_eq!(bandit_data, expected_bandit_data);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_retrieve_bandit_data_not_found() -> Result<()> {
|
||||
let db = RelevancyDb::new_for_test();
|
||||
let bandit = "provider".to_string();
|
||||
let arm = "weather".to_string();
|
||||
|
||||
let result = db.read(|dao| dao.retrieve_bandit_data(&bandit, &arm));
|
||||
|
||||
match result {
|
||||
Ok(bandit_data) => panic!(
|
||||
"Expected BanditNotFound error, but got Ok result with alpha: {}, beta: {}, impressions: {}, clicks: {}, bandit: {}, arm: {}",
|
||||
bandit_data.alpha, bandit_data.beta, bandit_data.impressions, bandit_data.clicks, bandit_data.arm, bandit_data.arm
|
||||
),
|
||||
Err(BanditNotFound { bandit: b, arm: a }) => {
|
||||
assert_eq!(b, bandit);
|
||||
assert_eq!(a, arm);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
4
third_party/rust/relevancy/src/ingest.rs
vendored
4
third_party/rust/relevancy/src/ingest.rs
vendored
@@ -344,7 +344,7 @@ mod test {
|
||||
}]))?
|
||||
.with_data(
|
||||
"data-1.json",
|
||||
json!([
|
||||
json!([
|
||||
{"domain": "J2jtyjQtYQ/+/p//xhz43Q=="},
|
||||
{"domain": "Zd4awCwGZLkat59nIWje3g=="}]),
|
||||
)?;
|
||||
@@ -383,7 +383,7 @@ mod test {
|
||||
}]))?
|
||||
.with_data(
|
||||
"data-1.json",
|
||||
json!([
|
||||
json!([
|
||||
{"domain": "badString"},
|
||||
{"domain": "notBase64"}]),
|
||||
)?;
|
||||
|
||||
120
third_party/rust/relevancy/src/lib.rs
vendored
120
third_party/rust/relevancy/src/lib.rs
vendored
@@ -23,20 +23,15 @@ use rand_distr::{Beta, Distribution};
|
||||
pub use db::RelevancyDb;
|
||||
pub use error::{ApiResult, Error, RelevancyApiError, Result};
|
||||
pub use interest::{Interest, InterestVector};
|
||||
use parking_lot::Mutex;
|
||||
pub use ranker::score;
|
||||
|
||||
use error_support::handle_error;
|
||||
|
||||
use db::BanditData;
|
||||
use std::collections::HashMap;
|
||||
|
||||
uniffi::setup_scaffolding!();
|
||||
|
||||
#[derive(uniffi::Object)]
|
||||
pub struct RelevancyStore {
|
||||
db: RelevancyDb,
|
||||
cache: Mutex<BanditCache>,
|
||||
}
|
||||
|
||||
/// Top-level API for the Relevancy component
|
||||
@@ -50,7 +45,6 @@ impl RelevancyStore {
|
||||
pub fn new(db_path: String) -> Self {
|
||||
Self {
|
||||
db: RelevancyDb::new(db_path),
|
||||
cache: Mutex::new(BanditCache::new()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,12 +125,15 @@ impl RelevancyStore {
|
||||
/// of success. The arm with the highest sampled probability is selected and returned.
|
||||
#[handle_error(Error)]
|
||||
pub fn bandit_select(&self, bandit: String, arms: &[String]) -> ApiResult<String> {
|
||||
let mut cache = self.cache.lock();
|
||||
// we should cache the distribution so we don't retrieve each time
|
||||
|
||||
let mut best_sample = f64::MIN;
|
||||
let mut selected_arm = String::new();
|
||||
|
||||
for arm in arms {
|
||||
let (alpha, beta) = cache.get_beta_distribution(&bandit, arm, &self.db)?;
|
||||
let (alpha, beta) = self
|
||||
.db
|
||||
.read(|dao| dao.retrieve_bandit_arm_beta_distribution(&bandit, arm))?;
|
||||
// this creates a Beta distribution for an alpha & beta pair
|
||||
let beta_dist = Beta::new(alpha as f64, beta as f64)
|
||||
.expect("computing betas dist unexpectedly failed");
|
||||
@@ -162,77 +159,10 @@ impl RelevancyStore {
|
||||
/// its likelihood of a negative outcome.
|
||||
#[handle_error(Error)]
|
||||
pub fn bandit_update(&self, bandit: String, arm: String, selected: bool) -> ApiResult<()> {
|
||||
let mut cache = self.cache.lock();
|
||||
|
||||
cache.clear(&bandit, &arm);
|
||||
|
||||
self.db
|
||||
.read_write(|dao| dao.update_bandit_arm_data(&bandit, &arm, selected))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retrieves the data for a specific bandit and arm.
|
||||
#[handle_error(Error)]
|
||||
pub fn get_bandit_data(&self, bandit: String, arm: String) -> ApiResult<BanditData> {
|
||||
let bandit_data = self
|
||||
.db
|
||||
.read(|dao| dao.retrieve_bandit_data(&bandit, &arm))?;
|
||||
|
||||
Ok(bandit_data)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct BanditCache {
|
||||
cache: HashMap<(String, String), (u64, u64)>,
|
||||
}
|
||||
|
||||
impl BanditCache {
|
||||
/// Creates a new, empty `BanditCache`.
|
||||
///
|
||||
/// The cache is initialized as an empty `HashMap` and is used to store
|
||||
/// precomputed Beta distribution parameters for faster access during
|
||||
/// Thompson Sampling operations.
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Retrieves the Beta distribution parameters for a given bandit and arm.
|
||||
///
|
||||
/// If the parameters for the specified `bandit` and `arm` are already cached,
|
||||
/// they are returned directly. Otherwise, the parameters are fetched from
|
||||
/// the database, added to the cache, and then returned.
|
||||
pub fn get_beta_distribution(
|
||||
&mut self,
|
||||
bandit: &str,
|
||||
arm: &str,
|
||||
db: &RelevancyDb,
|
||||
) -> Result<(u64, u64)> {
|
||||
let key = (bandit.to_string(), arm.to_string());
|
||||
|
||||
// Check if the distribution is already cached
|
||||
if let Some(¶ms) = self.cache.get(&key) {
|
||||
return Ok(params);
|
||||
}
|
||||
|
||||
let params = db.read(|dao| dao.retrieve_bandit_arm_beta_distribution(bandit, arm))?;
|
||||
|
||||
// Cache the retrieved parameters for future use
|
||||
self.cache.insert(key, params);
|
||||
|
||||
Ok(params)
|
||||
}
|
||||
|
||||
/// Clears the cached Beta distribution parameters for a given bandit and arm.
|
||||
///
|
||||
/// This removes the cached values for the specified `bandit` and `arm` from the cache.
|
||||
/// Use this method if the cached parameters are no longer valid or need to be refreshed.
|
||||
pub fn clear(&mut self, bandit: &str, arm: &str) {
|
||||
let key = (bandit.to_string(), arm.to_string());
|
||||
|
||||
self.cache.remove(&key);
|
||||
}
|
||||
}
|
||||
|
||||
impl RelevancyStore {
|
||||
@@ -409,44 +339,4 @@ mod test {
|
||||
"Thompson Sampling did not favor the best-performing arm"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_bandit_data() {
|
||||
let relevancy_store = setup_store("get_bandit_data");
|
||||
|
||||
let bandit = "provider".to_string();
|
||||
let arm = "wiki".to_string();
|
||||
|
||||
// initialize bandit
|
||||
relevancy_store
|
||||
.bandit_init(
|
||||
"provider".to_string(),
|
||||
&["weather".to_string(), "fakespot".to_string(), arm.clone()],
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// update beta distribution for arm based on click/no click
|
||||
relevancy_store
|
||||
.bandit_update(bandit.clone(), arm.clone(), true)
|
||||
.expect("Failed to update beta distribution for arm");
|
||||
|
||||
relevancy_store
|
||||
.bandit_update(bandit.clone(), arm.clone(), true)
|
||||
.expect("Failed to update beta distribution for arm");
|
||||
|
||||
let bandit_data = relevancy_store
|
||||
.get_bandit_data(bandit.clone(), arm.clone())
|
||||
.unwrap();
|
||||
|
||||
let expected_bandit_data = BanditData {
|
||||
bandit: bandit.clone(),
|
||||
arm: arm.clone(),
|
||||
impressions: 2,
|
||||
clicks: 2,
|
||||
alpha: 3,
|
||||
beta: 1,
|
||||
};
|
||||
|
||||
assert_eq!(bandit_data, expected_bandit_data);
|
||||
}
|
||||
}
|
||||
|
||||
8
third_party/rust/relevancy/src/ranker.rs
vendored
8
third_party/rust/relevancy/src/ranker.rs
vendored
@@ -1,7 +1,3 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::cmp::max;
|
||||
|
||||
use crate::interest::{Interest, InterestVector};
|
||||
@@ -12,11 +8,11 @@ use crate::interest::{Interest, InterestVector};
|
||||
/// - The score ranges from 0.0 to 1.0
|
||||
/// - The score is monotonically increasing for the accumulated interest count
|
||||
///
|
||||
/// # Params:
|
||||
/// Params:
|
||||
/// - `interest_vector`: a user interest vector that can be fetched via
|
||||
/// `RelevancyStore::user_interest_vector()`.
|
||||
/// - `content_categories`: a list of categories (interests) of the give content.
|
||||
/// # Return:
|
||||
/// Return:
|
||||
/// - A score ranges in [0, 1].
|
||||
#[uniffi::export]
|
||||
pub fn score(interest_vector: InterestVector, content_categories: Vec<Interest>) -> f64 {
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"3cc9adcb565e54012fdc66822cbdd9a03fe2faf1a5de7f2812b7e88ad0f0a25d","dumps/main/attachments/regions/world":"00b308033d44f61612b962f572765d14a3999586d92fc8b9fff2217a1ae070e8","dumps/main/attachments/regions/world-buffered":"1d3ed6954fac2a5b31302f5d3e8186c5fa08a20239afc0643ca5dfbb4d8a86fc","dumps/main/attachments/regions/world-buffered.meta.json":"914a71376a152036aceccb6877e079fbb9e3373c6219f24f00dd30e901a72cce","dumps/main/attachments/regions/world.meta.json":"2a47d77834997b98e563265d299723e7f7fd64c8c7a5731afc722862333d6fbd","dumps/main/regions.json":"e8990158373f82d3f89fed5089cf29e4177cc85904479128728e05025e9a0c0c","dumps/main/search-telemetry-v2.json":"1e3f49571183aa5db30c0877f0b291adb26ea7b81c04b16eac4f0320e9134a34","src/cache.rs":"c6179802017b43885136e7d64004890cc13e8c2d4742e04073cf404b578f63db","src/client.rs":"2683e3cdf0ad54ce450b14b6c2bf574e5344ec1c770a9d77541b1e4e4aa1a395","src/config.rs":"0294f122c76570c3df1de876289a6e70d0b03b4e8221a34ed7b893a377e68be1","src/error.rs":"fd831d51ebe312b7da2459f63c86ad3492a6551c490b93330ba8fdee20be7668","src/jexl_filter.rs":"e085f92b0ef9031106cf5d4999dbb19f467494c029f324b25d0098506b37b2e1","src/lib.rs":"f51bd3a8f7e3ff612dd251db80b44ed44ce02ebf84393b1825dc4b927019e844","src/macros.rs":"19735d74b6ce8d1fc21a85c39787a271f09a849a310db16ba36219aba9106736","src/service.rs":"5445803fc9f768f6b40514bfb1add437baf6d868b67fc29a349dda70923c8b5f","src/storage.rs":"ae921a8a184c7bf37ebdf7b857c481b76ca6103d20b6c09f8bb326636b083520","uniffi.toml":"bd7cc0e7c1981f53938f429c4f2541ac454ed4160a8a0b4670659e38acd23ee5"},"package":null}
|
||||
{"files":{"Cargo.toml":"d5dff72e0dfd21cc73d4867f26c0eb7ccbb0359bb489bf7f544231df630587bb","dumps/main/search-telemetry-v2.json":"1e3f49571183aa5db30c0877f0b291adb26ea7b81c04b16eac4f0320e9134a34","src/cache.rs":"c6179802017b43885136e7d64004890cc13e8c2d4742e04073cf404b578f63db","src/client.rs":"db588b7524ffc164c5870f93dfe8fb8d3168748af39d66405027b18c0ee7aca7","src/config.rs":"0294f122c76570c3df1de876289a6e70d0b03b4e8221a34ed7b893a377e68be1","src/error.rs":"f0cb9b7fa3e427c919b491f7864dd332365b2d0288be775064d115ef82311fc5","src/jexl_filter.rs":"e085f92b0ef9031106cf5d4999dbb19f467494c029f324b25d0098506b37b2e1","src/lib.rs":"16d9f3ca6336188df53410dcbe9afe6ac09a1be764ecd9094cfbec05512e7370","src/service.rs":"5445803fc9f768f6b40514bfb1add437baf6d868b67fc29a349dda70923c8b5f","src/storage.rs":"944c622c0ec7f32e0e9f6177abd4e01b7c6601f85b7c991f3752261119f87c84","uniffi.toml":"88cfb5e8e6ea16b0a3998f34791232a22478b2027ea4f1932dcc417b51fe2e7a"},"package":null}
|
||||
3
third_party/rust/remote_settings/Cargo.toml
vendored
3
third_party/rust/remote_settings/Cargo.toml
vendored
@@ -42,9 +42,8 @@ log = "0.4"
|
||||
parking_lot = "0.12"
|
||||
regex = "1.9"
|
||||
serde_json = "1"
|
||||
sha2 = "^0.10"
|
||||
thiserror = "1.0"
|
||||
url = "2"
|
||||
url = "2.1"
|
||||
|
||||
[dependencies.error-support]
|
||||
path = "../support/error"
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"location": "main-workspace/regions/60d3f85e-beb1-4147-9b99-cfb7cb079a53.geojson",
|
||||
"hash": "1d3ed6954fac2a5b31302f5d3e8186c5fa08a20239afc0643ca5dfbb4d8a86fc",
|
||||
"size": 1108807
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"location": "main-workspace/regions/ea0d6337-97e5-43e9-b708-90ec5bf96c15.geojson",
|
||||
"hash": "00b308033d44f61612b962f572765d14a3999586d92fc8b9fff2217a1ae070e8",
|
||||
"size": 1093343
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"attachment": {
|
||||
"filename": "world.geojson",
|
||||
"hash": "00b308033d44f61612b962f572765d14a3999586d92fc8b9fff2217a1ae070e8",
|
||||
"location": "main-workspace/regions/ea0d6337-97e5-43e9-b708-90ec5bf96c15.geojson",
|
||||
"mimetype": "application/octet-stream",
|
||||
"size": 1093343
|
||||
},
|
||||
"id": "world",
|
||||
"last_modified": 1600363002708,
|
||||
"schema": 1591565378397
|
||||
},
|
||||
{
|
||||
"attachment": {
|
||||
"filename": "world-buffered.geojson",
|
||||
"hash": "1d3ed6954fac2a5b31302f5d3e8186c5fa08a20239afc0643ca5dfbb4d8a86fc",
|
||||
"location": "main-workspace/regions/60d3f85e-beb1-4147-9b99-cfb7cb079a53.geojson",
|
||||
"mimetype": "application/octet-stream",
|
||||
"size": 1108807
|
||||
},
|
||||
"id": "world-buffered",
|
||||
"last_modified": 1600363002702,
|
||||
"schema": 1591565382168
|
||||
}
|
||||
],
|
||||
"timestamp": 1600363002708
|
||||
}
|
||||
316
third_party/rust/remote_settings/src/client.rs
vendored
316
third_party/rust/remote_settings/src/client.rs
vendored
@@ -9,12 +9,9 @@ use crate::jexl_filter::JexlFilter;
|
||||
use crate::storage::Storage;
|
||||
#[cfg(feature = "jexl")]
|
||||
use crate::RemoteSettingsContext;
|
||||
use crate::{
|
||||
packaged_attachments, packaged_collections, RemoteSettingsServer, UniffiCustomTypeConverter,
|
||||
};
|
||||
use crate::{RemoteSettingsServer, UniffiCustomTypeConverter};
|
||||
use parking_lot::Mutex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
time::{Duration, Instant},
|
||||
@@ -49,32 +46,6 @@ struct RemoteSettingsClientInner<C> {
|
||||
api_client: C,
|
||||
}
|
||||
|
||||
// Add your local packaged data you want to work with here
|
||||
impl<C: ApiClient> RemoteSettingsClient<C> {
|
||||
// One line per bucket + collection
|
||||
packaged_collections! {
|
||||
("main", "search-telemetry-v2"),
|
||||
("main", "regions"),
|
||||
}
|
||||
|
||||
// You have to specify
|
||||
// - bucket + collection_name: ("main", "regions")
|
||||
// - One line per file you want to add (e.g. "world")
|
||||
//
|
||||
// This will automatically also include the NAME.meta.json file
|
||||
// for internal validation against hash and size
|
||||
//
|
||||
// The entries line up with the `Attachment::filename` field,
|
||||
// and check for the folder + name in
|
||||
// `remote_settings/dumps/{bucket}/attachments/{collection}/{filename}
|
||||
packaged_attachments! {
|
||||
("main", "regions") => [
|
||||
"world",
|
||||
"world-buffered",
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: ApiClient> RemoteSettingsClient<C> {
|
||||
pub fn new_from_parts(
|
||||
collection_name: String,
|
||||
@@ -97,15 +68,22 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
|
||||
&self.collection_name
|
||||
}
|
||||
|
||||
fn load_packaged_data(&self) -> Option<CollectionData> {
|
||||
// Using the macro generated `get_packaged_data` in macros.rs
|
||||
Self::get_packaged_data(&self.collection_name)
|
||||
.and_then(|data| serde_json::from_str(data).ok())
|
||||
fn get_packaged_data(collection_name: &str) -> Option<&'static str> {
|
||||
match collection_name {
|
||||
// Add entries for each locally dumped collection in the `dumps/` folder.
|
||||
// This is also the place where we want to think about a macro! and feature-gating
|
||||
// different platforms.
|
||||
"search-telemetry-v2" => Some(include_str!(concat!(
|
||||
env!("CARGO_MANIFEST_DIR"),
|
||||
"/dumps/main/search-telemetry-v2.json"
|
||||
))),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn load_packaged_attachment(&self, filename: &str) -> Option<(&'static [u8], &'static str)> {
|
||||
// Using the macro generated `get_packaged_attachment` in macros.rs
|
||||
Self::get_packaged_attachment(&self.collection_name, filename)
|
||||
fn load_packaged_data(&self) -> Option<CollectionData> {
|
||||
Self::get_packaged_data(&self.collection_name)
|
||||
.and_then(|data| serde_json::from_str(data).ok())
|
||||
}
|
||||
|
||||
/// Filters records based on the presence and evaluation of `filter_expression`.
|
||||
@@ -134,6 +112,7 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
|
||||
pub fn get_records(&self, sync_if_empty: bool) -> Result<Option<Vec<RemoteSettingsRecord>>> {
|
||||
let mut inner = self.inner.lock();
|
||||
let collection_url = inner.api_client.collection_url();
|
||||
let cached_records = inner.storage.get_records(&collection_url)?;
|
||||
let is_prod = inner.api_client.is_prod_server()?;
|
||||
let packaged_data = if is_prod {
|
||||
self.load_packaged_data()
|
||||
@@ -141,16 +120,32 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
|
||||
None
|
||||
};
|
||||
|
||||
// Case 1: The packaged data is more recent than the cache
|
||||
//
|
||||
// This happens when there's no cached data or when we get new packaged data because of a
|
||||
// product update
|
||||
// Case 1: We have no cached records
|
||||
if cached_records.is_none() {
|
||||
// Case 1a: Use packaged data if available (prod only)
|
||||
if let Some(collection) = packaged_data {
|
||||
inner
|
||||
.storage
|
||||
.set_records(&collection_url, &collection.data)?;
|
||||
return Ok(Some(self.filter_records(collection.data)));
|
||||
}
|
||||
// Case 1b: No packaged data - fetch from remote if sync_if_empty
|
||||
if sync_if_empty {
|
||||
let records = inner.api_client.get_records(None)?;
|
||||
inner.storage.set_records(&collection_url, &records)?;
|
||||
return Ok(Some(self.filter_records(records)));
|
||||
}
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Now we know we have cached records
|
||||
let cached_records = cached_records.unwrap();
|
||||
let cached_timestamp = inner.storage.get_last_modified_timestamp(&collection_url)?;
|
||||
|
||||
// Case 2: We have packaged data and are in prod
|
||||
if let Some(packaged_data) = packaged_data {
|
||||
let cached_timestamp = inner
|
||||
.storage
|
||||
.get_last_modified_timestamp(&collection_url)?
|
||||
.unwrap_or(0);
|
||||
if packaged_data.timestamp > cached_timestamp {
|
||||
if packaged_data.timestamp > cached_timestamp.unwrap_or(0) {
|
||||
// Packaged data is newer
|
||||
inner
|
||||
.storage
|
||||
.set_records(&collection_url, &packaged_data.data)?;
|
||||
@@ -158,23 +153,17 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
|
||||
}
|
||||
}
|
||||
|
||||
let cached_records = inner.storage.get_records(&collection_url)?;
|
||||
// Case 3: Return cached data if we have it and either:
|
||||
// - it's not empty
|
||||
// - or we're not allowed to sync
|
||||
if !cached_records.is_empty() || !sync_if_empty {
|
||||
return Ok(Some(self.filter_records(cached_records)));
|
||||
}
|
||||
|
||||
Ok(match (cached_records, sync_if_empty) {
|
||||
// Case 2: We have cached records
|
||||
//
|
||||
// Note: we should return these even if it's an empty list and `sync_if_empty=true`.
|
||||
// The "if empty" part refers to the cache being empty, not the list.
|
||||
(Some(cached_records), _) => Some(self.filter_records(cached_records)),
|
||||
// Case 3: sync_if_empty=true
|
||||
(None, true) => {
|
||||
let records = inner.api_client.get_records(None)?;
|
||||
inner.storage.set_records(&collection_url, &records)?;
|
||||
Some(self.filter_records(records))
|
||||
}
|
||||
// Case 4: Nothing to return
|
||||
(None, false) => None,
|
||||
})
|
||||
// Case 4: Cache is empty and we're allowed to sync
|
||||
let records = inner.api_client.get_records(None)?;
|
||||
inner.storage.set_records(&collection_url, &records)?;
|
||||
Ok(Some(self.filter_records(records)))
|
||||
}
|
||||
|
||||
pub fn sync(&self) -> Result<()> {
|
||||
@@ -182,65 +171,16 @@ impl<C: ApiClient> RemoteSettingsClient<C> {
|
||||
let collection_url = inner.api_client.collection_url();
|
||||
let mtime = inner.storage.get_last_modified_timestamp(&collection_url)?;
|
||||
let records = inner.api_client.get_records(mtime)?;
|
||||
inner.storage.merge_records(&collection_url, &records)
|
||||
inner.storage.set_records(&collection_url, &records)
|
||||
}
|
||||
|
||||
/// Downloads an attachment from [attachment_location]. NOTE: there are no guarantees about a
|
||||
/// maximum size, so use care when fetching potentially large attachments.
|
||||
pub fn get_attachment(&self, record: RemoteSettingsRecord) -> Result<Vec<u8>> {
|
||||
let metadata = record
|
||||
.attachment
|
||||
.ok_or_else(|| Error::RecordAttachmentMismatchError("No attachment metadata".into()))?;
|
||||
|
||||
let mut inner = self.inner.lock();
|
||||
let collection_url = inner.api_client.collection_url();
|
||||
|
||||
// First try storage - it will only return data that matches our metadata
|
||||
if let Some(data) = inner
|
||||
.storage
|
||||
.get_attachment(&collection_url, metadata.clone())?
|
||||
{
|
||||
return Ok(data);
|
||||
}
|
||||
|
||||
// Then try packaged data if we're in prod
|
||||
if inner.api_client.is_prod_server()? {
|
||||
if let Some((data, manifest)) = self.load_packaged_attachment(&metadata.location) {
|
||||
if let Ok(manifest_data) = serde_json::from_str::<serde_json::Value>(manifest) {
|
||||
if metadata.hash == manifest_data["hash"].as_str().unwrap_or_default()
|
||||
&& metadata.size == manifest_data["size"].as_u64().unwrap_or_default()
|
||||
{
|
||||
// Store valid packaged data in storage because it was either empty or outdated
|
||||
inner
|
||||
.storage
|
||||
.set_attachment(&collection_url, &metadata.location, data)?;
|
||||
return Ok(data.to_vec());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try to download the attachment because neither the storage nor the local data had it
|
||||
let attachment = inner.api_client.get_attachment(&metadata.location)?;
|
||||
|
||||
// Verify downloaded data
|
||||
if attachment.len() as u64 != metadata.size {
|
||||
return Err(Error::RecordAttachmentMismatchError(
|
||||
"Downloaded attachment size mismatch".into(),
|
||||
));
|
||||
}
|
||||
let hash = format!("{:x}", Sha256::digest(&attachment));
|
||||
if hash != metadata.hash {
|
||||
return Err(Error::RecordAttachmentMismatchError(
|
||||
"Downloaded attachment hash mismatch".into(),
|
||||
));
|
||||
}
|
||||
|
||||
// Store verified download in storage
|
||||
inner
|
||||
.storage
|
||||
.set_attachment(&collection_url, &metadata.location, &attachment)?;
|
||||
Ok(attachment)
|
||||
pub fn get_attachment(&self, attachment_location: &str) -> Result<Vec<u8>> {
|
||||
self.inner
|
||||
.lock()
|
||||
.api_client
|
||||
.get_attachment(attachment_location)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -726,7 +666,7 @@ pub struct RemoteSettingsRecord {
|
||||
|
||||
/// Attachment metadata that can be optionally attached to a [Record]. The [location] should
|
||||
/// included in calls to [Client::get_attachment].
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, Eq, PartialEq, uniffi::Record)]
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, Eq, PartialEq, uniffi::Record)]
|
||||
pub struct Attachment {
|
||||
pub filename: String,
|
||||
pub mimetype: String,
|
||||
@@ -2100,134 +2040,52 @@ mod cached_data_tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "jexl"))]
|
||||
#[cfg(test)]
|
||||
mod test_packaged_metadata {
|
||||
use super::*;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[test]
|
||||
fn test_no_cached_data_use_packaged_attachment() -> Result<()> {
|
||||
let collection_name = "regions";
|
||||
let attachment_name = "world";
|
||||
|
||||
// Verify our packaged attachment exists with its manifest
|
||||
let base_path = PathBuf::from(env!("CARGO_MANIFEST_DIR"))
|
||||
.join("dumps")
|
||||
.join("main")
|
||||
.join("attachments")
|
||||
.join(collection_name);
|
||||
|
||||
let file_path = base_path.join(attachment_name);
|
||||
let manifest_path = base_path.join(format!("{}.meta.json", attachment_name));
|
||||
|
||||
assert!(
|
||||
file_path.exists(),
|
||||
"Packaged attachment should exist for this test"
|
||||
);
|
||||
assert!(
|
||||
manifest_path.exists(),
|
||||
"Manifest file should exist for this test"
|
||||
);
|
||||
|
||||
let manifest_content = std::fs::read_to_string(manifest_path)?;
|
||||
let manifest: serde_json::Value = serde_json::from_str(&manifest_content)?;
|
||||
|
||||
fn test_cached_data_empty_sync_if_empty_true() -> Result<()> {
|
||||
let collection_name = "test-collection";
|
||||
let mut api_client = MockApiClient::new();
|
||||
let storage = Storage::new(":memory:".into())?;
|
||||
let mut storage = Storage::new(":memory:".into())?;
|
||||
|
||||
let collection_url = format!(
|
||||
"https://firefox.settings.services.mozilla.com/v1/buckets/main/collections/{}",
|
||||
collection_name
|
||||
);
|
||||
|
||||
// Mock get_records to return some data
|
||||
let expected_records = vec![RemoteSettingsRecord {
|
||||
id: "remote1".to_string(),
|
||||
last_modified: 1000,
|
||||
deleted: false,
|
||||
attachment: None,
|
||||
fields: serde_json::Map::new(),
|
||||
}];
|
||||
api_client
|
||||
.expect_get_records()
|
||||
.withf(|timestamp| timestamp.is_none())
|
||||
.returning(move |_| Ok(expected_records.clone()));
|
||||
api_client.expect_is_prod_server().returning(|| Ok(true));
|
||||
|
||||
// Set up empty cached records
|
||||
let cached_records: Vec<RemoteSettingsRecord> = vec![];
|
||||
storage.set_records(&collection_url, &cached_records)?;
|
||||
|
||||
api_client
|
||||
.expect_collection_url()
|
||||
.returning(move || collection_url.clone());
|
||||
api_client.expect_is_prod_server().returning(|| Ok(true));
|
||||
|
||||
let rs_client =
|
||||
RemoteSettingsClient::new_from_parts(collection_name.to_string(), storage, api_client);
|
||||
|
||||
// Create record with metadata from manifest
|
||||
let attachment_metadata = Attachment {
|
||||
filename: attachment_name.to_string(),
|
||||
mimetype: "application/octet-stream".to_string(),
|
||||
location: attachment_name.to_string(),
|
||||
size: manifest["size"].as_u64().unwrap(),
|
||||
hash: manifest["hash"].as_str().unwrap().to_string(),
|
||||
};
|
||||
|
||||
let record = RemoteSettingsRecord {
|
||||
id: "test-record".to_string(),
|
||||
last_modified: 12345,
|
||||
deleted: false,
|
||||
attachment: Some(attachment_metadata),
|
||||
fields: serde_json::json!({}).as_object().unwrap().clone(),
|
||||
};
|
||||
|
||||
let attachment_data = rs_client.get_attachment(record)?;
|
||||
|
||||
// Verify we got the expected data
|
||||
let expected_data = std::fs::read(file_path)?;
|
||||
assert_eq!(attachment_data, expected_data);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_packaged_attachment_outdated_fetch_from_api() -> Result<()> {
|
||||
let collection_name = "regions";
|
||||
let attachment_name = "world";
|
||||
|
||||
let mut api_client = MockApiClient::new();
|
||||
let storage = Storage::new(":memory:".into())?;
|
||||
|
||||
let collection_url = format!(
|
||||
"https://firefox.settings.services.mozilla.com/v1/buckets/main/collections/{}",
|
||||
collection_name
|
||||
// Call get_records with sync_if_empty = true
|
||||
let records = rs_client.get_records(true)?;
|
||||
assert!(
|
||||
records.is_some(),
|
||||
"Records should be fetched from the remote server"
|
||||
);
|
||||
|
||||
// Prepare mock data
|
||||
let mock_api_data = vec![1, 2, 3, 4, 5];
|
||||
|
||||
// Create metadata that doesn't match our packaged data
|
||||
let attachment_metadata = Attachment {
|
||||
filename: attachment_name.to_string(),
|
||||
mimetype: "application/octet-stream".to_string(),
|
||||
location: attachment_name.to_string(),
|
||||
size: mock_api_data.len() as u64,
|
||||
hash: {
|
||||
use sha2::{Digest, Sha256};
|
||||
format!("{:x}", Sha256::digest(&mock_api_data))
|
||||
},
|
||||
};
|
||||
|
||||
api_client
|
||||
.expect_collection_url()
|
||||
.returning(move || collection_url.clone());
|
||||
api_client.expect_is_prod_server().returning(|| Ok(true));
|
||||
api_client
|
||||
.expect_get_attachment()
|
||||
.returning(move |_| Ok(mock_api_data.clone()));
|
||||
|
||||
let rs_client =
|
||||
RemoteSettingsClient::new_from_parts(collection_name.to_string(), storage, api_client);
|
||||
|
||||
let record = RemoteSettingsRecord {
|
||||
id: "test-record".to_string(),
|
||||
last_modified: 12345,
|
||||
deleted: false,
|
||||
attachment: Some(attachment_metadata),
|
||||
fields: serde_json::json!({}).as_object().unwrap().clone(),
|
||||
};
|
||||
|
||||
let attachment_data = rs_client.get_attachment(record)?;
|
||||
|
||||
// Verify we got the mock API data, not the packaged data
|
||||
assert_eq!(attachment_data, vec![1, 2, 3, 4, 5]);
|
||||
let records = records.unwrap();
|
||||
assert_eq!(records.len(), 1);
|
||||
assert_eq!(records[0].id, "remote1");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -47,8 +47,6 @@ pub enum Error {
|
||||
ConfigError(String),
|
||||
#[error("Database error: {0}")]
|
||||
DatabaseError(#[from] rusqlite::Error),
|
||||
#[error("No attachment in given record: {0}")]
|
||||
RecordAttachmentMismatchError(String),
|
||||
}
|
||||
|
||||
// Define how our internal errors are handled and converted to external errors
|
||||
|
||||
7
third_party/rust/remote_settings/src/lib.rs
vendored
7
third_party/rust/remote_settings/src/lib.rs
vendored
@@ -18,7 +18,6 @@ pub mod storage;
|
||||
|
||||
#[cfg(feature = "jexl")]
|
||||
pub(crate) mod jexl_filter;
|
||||
mod macros;
|
||||
|
||||
pub use client::{Attachment, RemoteSettingsRecord, RemoteSettingsResponse, RsJsonObject};
|
||||
pub use config::{RemoteSettingsConfig, RemoteSettingsConfig2, RemoteSettingsServer};
|
||||
@@ -156,6 +155,8 @@ impl RemoteSettingsClient {
|
||||
/// this is that there is not much an application can do in this situation other than fall back
|
||||
/// to the same default handling as if records have not been synced.
|
||||
///
|
||||
/// TODO(Bug 1919141):
|
||||
///
|
||||
/// Application-services schedules regular dumps of the server data for specific collections.
|
||||
/// For these collections, `get_records` will never return None. If you would like to add your
|
||||
/// collection to this list, please reach out to the DISCO team.
|
||||
@@ -197,8 +198,8 @@ impl RemoteSettingsClient {
|
||||
/// - This method will throw if there is a network or other error when fetching the
|
||||
/// attachment data.
|
||||
#[handle_error(Error)]
|
||||
pub fn get_attachment(&self, record: RemoteSettingsRecord) -> ApiResult<Vec<u8>> {
|
||||
self.internal.get_attachment(record)
|
||||
pub fn get_attachment(&self, attachment_id: String) -> ApiResult<Vec<u8>> {
|
||||
self.internal.get_attachment(&attachment_id)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
57
third_party/rust/remote_settings/src/macros.rs
vendored
57
third_party/rust/remote_settings/src/macros.rs
vendored
@@ -1,57 +0,0 @@
|
||||
#[macro_export]
|
||||
macro_rules! packaged_collections {
|
||||
($(($bucket:expr, $collection:expr)),* $(,)?) => {
|
||||
fn get_packaged_data(collection_name: &str) -> Option<&'static str> {
|
||||
match collection_name {
|
||||
$($collection => Some(include_str!(concat!(
|
||||
env!("CARGO_MANIFEST_DIR"),
|
||||
"/dumps/",
|
||||
$bucket,
|
||||
"/",
|
||||
$collection,
|
||||
".json"
|
||||
))),)*
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! packaged_attachments {
|
||||
() => {
|
||||
fn get_packaged_attachment(collection_name: &str, filename: &str) -> Option<(&'static [u8], &'static str)> {
|
||||
None
|
||||
}
|
||||
};
|
||||
($(($bucket:expr, $collection:expr) => [$($filename:expr),* $(,)?]),* $(,)?) => {
|
||||
fn get_packaged_attachment(collection_name: &str, filename: &str) -> Option<(&'static [u8], &'static str)> {
|
||||
match (collection_name, filename) {
|
||||
$($(
|
||||
($collection, $filename) => Some((
|
||||
include_bytes!(concat!(
|
||||
env!("CARGO_MANIFEST_DIR"),
|
||||
"/dumps/",
|
||||
$bucket,
|
||||
"/attachments/",
|
||||
$collection,
|
||||
"/",
|
||||
$filename
|
||||
)),
|
||||
include_str!(concat!(
|
||||
env!("CARGO_MANIFEST_DIR"),
|
||||
"/dumps/",
|
||||
$bucket,
|
||||
"/attachments/",
|
||||
$collection,
|
||||
"/",
|
||||
$filename,
|
||||
".meta.json"
|
||||
))
|
||||
)),
|
||||
)*)*
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
353
third_party/rust/remote_settings/src/storage.rs
vendored
353
third_party/rust/remote_settings/src/storage.rs
vendored
@@ -4,9 +4,8 @@
|
||||
|
||||
use crate::{Attachment, RemoteSettingsRecord, Result};
|
||||
use camino::Utf8PathBuf;
|
||||
use rusqlite::{params, Connection, OptionalExtension, Transaction};
|
||||
use rusqlite::{params, Connection, OptionalExtension};
|
||||
use serde_json;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
/// Internal storage type
|
||||
///
|
||||
@@ -43,7 +42,7 @@ impl Storage {
|
||||
collection_url TEXT NOT NULL,
|
||||
data BLOB NOT NULL
|
||||
);
|
||||
CREATE TABLE IF NOT EXISTS attachments (
|
||||
CREATE TABLE IF NOT EXISTS attachments (
|
||||
id TEXT PRIMARY KEY,
|
||||
collection_url TEXT NOT NULL,
|
||||
data BLOB NOT NULL
|
||||
@@ -61,7 +60,7 @@ impl Storage {
|
||||
/// Get the last modified timestamp for the stored records
|
||||
///
|
||||
/// Returns None if no records are stored or if `collection_url` does not match the
|
||||
/// last `collection_url` passed to `set_records` / `merge_records`
|
||||
/// `collection_url` passed to `set_records`.
|
||||
pub fn get_last_modified_timestamp(&self, collection_url: &str) -> Result<Option<u64>> {
|
||||
let mut stmt = self
|
||||
.conn
|
||||
@@ -114,27 +113,17 @@ impl Storage {
|
||||
pub fn get_attachment(
|
||||
&self,
|
||||
collection_url: &str,
|
||||
metadata: Attachment,
|
||||
) -> Result<Option<Vec<u8>>> {
|
||||
attachment_id: &str,
|
||||
) -> Result<Option<Attachment>> {
|
||||
let mut stmt = self
|
||||
.conn
|
||||
.prepare("SELECT data FROM attachments WHERE id = ? AND collection_url = ?")?;
|
||||
|
||||
if let Some(data) = stmt
|
||||
.query_row((metadata.location, collection_url), |row| {
|
||||
row.get::<_, Vec<u8>>(0)
|
||||
})
|
||||
.optional()?
|
||||
{
|
||||
// Return None if data doesn't match expected metadata
|
||||
if data.len() as u64 != metadata.size {
|
||||
return Ok(None);
|
||||
}
|
||||
let hash = format!("{:x}", Sha256::digest(&data));
|
||||
if hash != metadata.hash {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(data))
|
||||
let result: Option<Vec<u8>> = stmt
|
||||
.query_row((attachment_id, collection_url), |row| row.get(0))
|
||||
.optional()?;
|
||||
if let Some(data) = result {
|
||||
let attachment: Attachment = serde_json::from_slice(&data)?;
|
||||
Ok(Some(attachment))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
@@ -144,87 +133,36 @@ impl Storage {
|
||||
pub fn set_records(
|
||||
&mut self,
|
||||
collection_url: &str,
|
||||
records: &[RemoteSettingsRecord],
|
||||
records: &Vec<RemoteSettingsRecord>,
|
||||
) -> Result<()> {
|
||||
let tx = self.conn.transaction()?;
|
||||
|
||||
// Delete ALL existing records and metadata for every collection_url
|
||||
tx.execute("DELETE FROM records", [])?;
|
||||
tx.execute("DELETE FROM collection_metadata", [])?;
|
||||
let max_last_modified = Self::update_record_rows(&tx, collection_url, records)?;
|
||||
Self::update_collection_metadata(&tx, collection_url, max_last_modified)?;
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Merge new records with records stored in the database
|
||||
///
|
||||
/// Records with `deleted=false` will be inserted into the DB, replacing any previously stored
|
||||
/// records with the same ID. Records with `deleted=true` will be removed.
|
||||
pub fn merge_records(
|
||||
&mut self,
|
||||
collection_url: &str,
|
||||
records: &[RemoteSettingsRecord],
|
||||
) -> Result<()> {
|
||||
let tx = self.conn.transaction()?;
|
||||
|
||||
// Delete ALL existing records and metadata for with different collection_urls.
|
||||
//
|
||||
// This way, if a user (probably QA) switches the remote settings server in the middle of a
|
||||
// browser sessions, we'll delete the stale data from the previous server.
|
||||
tx.execute(
|
||||
"DELETE FROM records where collection_url <> ?",
|
||||
[collection_url],
|
||||
)?;
|
||||
tx.execute(
|
||||
"DELETE FROM collection_metadata where collection_url <> ?",
|
||||
[collection_url],
|
||||
)?;
|
||||
let max_last_modified = Self::update_record_rows(&tx, collection_url, records)?;
|
||||
Self::update_collection_metadata(&tx, collection_url, max_last_modified)?;
|
||||
tx.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Insert/remove/update rows in the records table based on a records list
|
||||
///
|
||||
/// Returns the max last modified record from the list
|
||||
fn update_record_rows(
|
||||
tx: &Transaction<'_>,
|
||||
collection_url: &str,
|
||||
records: &[RemoteSettingsRecord],
|
||||
) -> Result<u64> {
|
||||
// Find the max last_modified time while inserting records
|
||||
let mut max_last_modified = 0;
|
||||
{
|
||||
let mut insert_stmt = tx.prepare(
|
||||
"INSERT OR REPLACE INTO records (id, collection_url, data) VALUES (?, ?, ?)",
|
||||
)?;
|
||||
let mut delete_stmt = tx.prepare("DELETE FROM records WHERE id=?")?;
|
||||
let mut stmt =
|
||||
tx.prepare("INSERT INTO records (id, collection_url, data) VALUES (?, ?, ?)")?;
|
||||
for record in records {
|
||||
if record.deleted {
|
||||
delete_stmt.execute(params![&record.id])?;
|
||||
} else {
|
||||
max_last_modified = max_last_modified.max(record.last_modified);
|
||||
let data = serde_json::to_vec(&record)?;
|
||||
insert_stmt.execute(params![record.id, collection_url, data])?;
|
||||
}
|
||||
max_last_modified = max_last_modified.max(record.last_modified);
|
||||
let data = serde_json::to_vec(record)?;
|
||||
|
||||
stmt.execute(params![record.id, collection_url, data])?;
|
||||
}
|
||||
}
|
||||
Ok(max_last_modified)
|
||||
}
|
||||
|
||||
/// Update the collection metadata after setting/merging records
|
||||
fn update_collection_metadata(
|
||||
tx: &Transaction<'_>,
|
||||
collection_url: &str,
|
||||
last_modified: u64,
|
||||
) -> Result<()> {
|
||||
// Update the metadata
|
||||
let fetched = true;
|
||||
tx.execute(
|
||||
"INSERT OR REPLACE INTO collection_metadata (collection_url, last_modified, fetched) VALUES (?, ?, ?)",
|
||||
(collection_url, last_modified, fetched),
|
||||
(collection_url, max_last_modified, fetched),
|
||||
)?;
|
||||
|
||||
tx.commit()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -232,8 +170,8 @@ impl Storage {
|
||||
pub fn set_attachment(
|
||||
&mut self,
|
||||
collection_url: &str,
|
||||
location: &str,
|
||||
attachment: &[u8],
|
||||
attachment_id: &str,
|
||||
attachment: Attachment,
|
||||
) -> Result<()> {
|
||||
let tx = self.conn.transaction()?;
|
||||
|
||||
@@ -243,11 +181,11 @@ impl Storage {
|
||||
params![collection_url],
|
||||
)?;
|
||||
|
||||
let data = serde_json::to_vec(&attachment)?;
|
||||
|
||||
tx.execute(
|
||||
"INSERT OR REPLACE INTO ATTACHMENTS \
|
||||
(id, collection_url, data) \
|
||||
VALUES (?, ?, ?)",
|
||||
params![location, collection_url, attachment,],
|
||||
"INSERT OR REPLACE INTO attachments (id, collection_url, data) VALUES (?, ?, ?)",
|
||||
params![attachment_id, collection_url, data],
|
||||
)?;
|
||||
|
||||
tx.commit()?;
|
||||
@@ -271,8 +209,7 @@ impl Storage {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Storage;
|
||||
use crate::{Attachment, RemoteSettingsRecord, Result, RsJsonObject};
|
||||
use sha2::{Digest, Sha256};
|
||||
use crate::{Attachment, RemoteSettingsRecord, Result};
|
||||
|
||||
#[test]
|
||||
fn test_storage_set_and_get_records() -> Result<()> {
|
||||
@@ -362,21 +299,21 @@ mod tests {
|
||||
fn test_storage_set_and_get_attachment() -> Result<()> {
|
||||
let mut storage = Storage::new(":memory:".into())?;
|
||||
|
||||
let attachment = &[0x18, 0x64];
|
||||
let collection_url = "https://example.com/api";
|
||||
let attachment_metadata = Attachment {
|
||||
let attachment_id = "attachment1";
|
||||
let attachment = Attachment {
|
||||
filename: "abc".to_string(),
|
||||
mimetype: "application/json".to_string(),
|
||||
location: "tmp".to_string(),
|
||||
hash: format!("{:x}", Sha256::digest(attachment)),
|
||||
size: attachment.len() as u64,
|
||||
hash: "abc123".to_string(),
|
||||
size: 1024,
|
||||
};
|
||||
|
||||
// Store attachment
|
||||
storage.set_attachment(collection_url, &attachment_metadata.location, attachment)?;
|
||||
storage.set_attachment(collection_url, attachment_id, attachment.clone())?;
|
||||
|
||||
// Get attachment
|
||||
let fetched_attachment = storage.get_attachment(collection_url, attachment_metadata)?;
|
||||
let fetched_attachment = storage.get_attachment(collection_url, attachment_id)?;
|
||||
assert!(fetched_attachment.is_some());
|
||||
let fetched_attachment = fetched_attachment.unwrap();
|
||||
assert_eq!(fetched_attachment, attachment);
|
||||
@@ -389,42 +326,30 @@ mod tests {
|
||||
let mut storage = Storage::new(":memory:".into())?;
|
||||
|
||||
let collection_url = "https://example.com/api";
|
||||
|
||||
let attachment_1 = &[0x18, 0x64];
|
||||
let attachment_2 = &[0x12, 0x48];
|
||||
|
||||
let attachment_metadata_1 = Attachment {
|
||||
let attachment_id = "attachment1";
|
||||
let attachment_1 = Attachment {
|
||||
filename: "abc".to_string(),
|
||||
mimetype: "application/json".to_string(),
|
||||
location: "tmp".to_string(),
|
||||
hash: format!("{:x}", Sha256::digest(attachment_1)),
|
||||
size: attachment_1.len() as u64,
|
||||
hash: "abc123".to_string(),
|
||||
size: 1024,
|
||||
};
|
||||
|
||||
let attachment_metadata_2 = Attachment {
|
||||
let attachment_2 = Attachment {
|
||||
filename: "def".to_string(),
|
||||
mimetype: "application/json".to_string(),
|
||||
location: "tmp".to_string(),
|
||||
hash: format!("{:x}", Sha256::digest(attachment_2)),
|
||||
size: attachment_2.len() as u64,
|
||||
hash: "def456".to_string(),
|
||||
size: 2048,
|
||||
};
|
||||
|
||||
// Store first attachment
|
||||
storage.set_attachment(
|
||||
collection_url,
|
||||
&attachment_metadata_1.location,
|
||||
attachment_1,
|
||||
)?;
|
||||
storage.set_attachment(collection_url, attachment_id, attachment_1.clone())?;
|
||||
|
||||
// Replace attachment with new data
|
||||
storage.set_attachment(
|
||||
collection_url,
|
||||
&attachment_metadata_2.location,
|
||||
attachment_2,
|
||||
)?;
|
||||
storage.set_attachment(collection_url, attachment_id, attachment_2.clone())?;
|
||||
|
||||
// Get attachment
|
||||
let fetched_attachment = storage.get_attachment(collection_url, attachment_metadata_2)?;
|
||||
let fetched_attachment = storage.get_attachment(collection_url, attachment_id)?;
|
||||
assert!(fetched_attachment.is_some());
|
||||
let fetched_attachment = fetched_attachment.unwrap();
|
||||
assert_eq!(fetched_attachment, attachment_2);
|
||||
@@ -438,45 +363,32 @@ mod tests {
|
||||
|
||||
let collection_url_1 = "https://example.com/api1";
|
||||
let collection_url_2 = "https://example.com/api2";
|
||||
|
||||
let attachment_1 = &[0x18, 0x64];
|
||||
let attachment_2 = &[0x12, 0x48];
|
||||
|
||||
let attachment_metadata_1 = Attachment {
|
||||
let attachment_id_1 = "attachment1";
|
||||
let attachment_id_2 = "attachment2";
|
||||
let attachment_1 = Attachment {
|
||||
filename: "abc".to_string(),
|
||||
mimetype: "application/json".to_string(),
|
||||
location: "first_tmp".to_string(),
|
||||
hash: format!("{:x}", Sha256::digest(attachment_1)),
|
||||
size: attachment_1.len() as u64,
|
||||
location: "tmp".to_string(),
|
||||
hash: "abc123".to_string(),
|
||||
size: 1024,
|
||||
};
|
||||
|
||||
let attachment_metadata_2 = Attachment {
|
||||
let attachment_2 = Attachment {
|
||||
filename: "def".to_string(),
|
||||
mimetype: "application/json".to_string(),
|
||||
location: "second_tmp".to_string(),
|
||||
hash: format!("{:x}", Sha256::digest(attachment_2)),
|
||||
size: attachment_2.len() as u64,
|
||||
location: "tmp".to_string(),
|
||||
hash: "def456".to_string(),
|
||||
size: 2048,
|
||||
};
|
||||
|
||||
// Set attachments for two different collections
|
||||
storage.set_attachment(
|
||||
collection_url_1,
|
||||
&attachment_metadata_1.location,
|
||||
attachment_1,
|
||||
)?;
|
||||
storage.set_attachment(
|
||||
collection_url_2,
|
||||
&attachment_metadata_2.location,
|
||||
attachment_2,
|
||||
)?;
|
||||
storage.set_attachment(collection_url_1, attachment_id_1, attachment_1.clone())?;
|
||||
storage.set_attachment(collection_url_2, attachment_id_2, attachment_2.clone())?;
|
||||
|
||||
// Verify that only the attachment for the second collection remains
|
||||
let fetched_attachment_1 =
|
||||
storage.get_attachment(collection_url_1, attachment_metadata_1)?;
|
||||
let fetched_attachment_1 = storage.get_attachment(collection_url_1, attachment_id_1)?;
|
||||
assert!(fetched_attachment_1.is_none());
|
||||
|
||||
let fetched_attachment_2 =
|
||||
storage.get_attachment(collection_url_2, attachment_metadata_2)?;
|
||||
let fetched_attachment_2 = storage.get_attachment(collection_url_2, attachment_id_2)?;
|
||||
assert!(fetched_attachment_2.is_some());
|
||||
let fetched_attachment_2 = fetched_attachment_2.unwrap();
|
||||
assert_eq!(fetched_attachment_2, attachment_2);
|
||||
@@ -489,10 +401,10 @@ mod tests {
|
||||
let storage = Storage::new(":memory:".into())?;
|
||||
|
||||
let collection_url = "https://example.com/api";
|
||||
let metadata = Attachment::default();
|
||||
let attachment_id = "nonexistent";
|
||||
|
||||
// Get attachment that doesn't exist
|
||||
let fetched_attachment = storage.get_attachment(collection_url, metadata)?;
|
||||
let fetched_attachment = storage.get_attachment(collection_url, attachment_id)?;
|
||||
assert!(fetched_attachment.is_none());
|
||||
|
||||
Ok(())
|
||||
@@ -503,8 +415,6 @@ mod tests {
|
||||
let mut storage = Storage::new(":memory:".into())?;
|
||||
|
||||
let collection_url = "https://example.com/api";
|
||||
let attachment = &[0x18, 0x64];
|
||||
|
||||
let records = vec![
|
||||
RemoteSettingsRecord {
|
||||
id: "1".to_string(),
|
||||
@@ -520,33 +430,30 @@ mod tests {
|
||||
id: "2".to_string(),
|
||||
last_modified: 200,
|
||||
deleted: false,
|
||||
attachment: Some(Attachment {
|
||||
filename: "abc".to_string(),
|
||||
mimetype: "application/json".to_string(),
|
||||
location: "tmp".to_string(),
|
||||
hash: format!("{:x}", Sha256::digest(attachment)),
|
||||
size: attachment.len() as u64,
|
||||
}),
|
||||
attachment: None,
|
||||
fields: serde_json::json!({"key": "value2"})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
.clone(),
|
||||
},
|
||||
];
|
||||
|
||||
let metadata = records[1]
|
||||
.clone()
|
||||
.attachment
|
||||
.expect("No attachment metadata for record");
|
||||
let attachment_id = "attachment1";
|
||||
let attachment = Attachment {
|
||||
filename: "abc".to_string(),
|
||||
mimetype: "application/json".to_string(),
|
||||
location: "tmp".to_string(),
|
||||
hash: "abc123".to_string(),
|
||||
size: 1024,
|
||||
};
|
||||
|
||||
// Set records and attachment
|
||||
storage.set_records(collection_url, &records)?;
|
||||
storage.set_attachment(collection_url, &metadata.location, attachment)?;
|
||||
storage.set_attachment(collection_url, attachment_id, attachment.clone())?;
|
||||
|
||||
// Verify they are stored
|
||||
let fetched_records = storage.get_records(collection_url)?;
|
||||
assert!(fetched_records.is_some());
|
||||
let fetched_attachment = storage.get_attachment(collection_url, metadata.clone())?;
|
||||
let fetched_attachment = storage.get_attachment(collection_url, attachment_id)?;
|
||||
assert!(fetched_attachment.is_some());
|
||||
|
||||
// Empty the storage
|
||||
@@ -555,7 +462,7 @@ mod tests {
|
||||
// Verify they are deleted
|
||||
let fetched_records = storage.get_records(collection_url)?;
|
||||
assert!(fetched_records.is_none());
|
||||
let fetched_attachment = storage.get_attachment(collection_url, metadata)?;
|
||||
let fetched_attachment = storage.get_attachment(collection_url, attachment_id)?;
|
||||
assert!(fetched_attachment.is_none());
|
||||
|
||||
Ok(())
|
||||
@@ -622,7 +529,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_storage_set_records() -> Result<()> {
|
||||
fn test_storage_update_records() -> Result<()> {
|
||||
let mut storage = Storage::new(":memory:".into())?;
|
||||
|
||||
let collection_url = "https://example.com/api";
|
||||
@@ -669,114 +576,4 @@ mod tests {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Quick way to generate the fields data for our mock records
|
||||
fn test_fields(data: &str) -> RsJsonObject {
|
||||
let mut map = serde_json::Map::new();
|
||||
map.insert("data".into(), data.into());
|
||||
map
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_storage_merge_records() -> Result<()> {
|
||||
let mut storage = Storage::new(":memory:".into())?;
|
||||
|
||||
let collection_url = "https://example.com/api";
|
||||
|
||||
let initial_records = vec![
|
||||
RemoteSettingsRecord {
|
||||
id: "a".into(),
|
||||
last_modified: 100,
|
||||
deleted: false,
|
||||
attachment: None,
|
||||
fields: test_fields("a"),
|
||||
},
|
||||
RemoteSettingsRecord {
|
||||
id: "b".into(),
|
||||
last_modified: 200,
|
||||
deleted: false,
|
||||
attachment: None,
|
||||
fields: test_fields("b"),
|
||||
},
|
||||
RemoteSettingsRecord {
|
||||
id: "c".into(),
|
||||
last_modified: 300,
|
||||
deleted: false,
|
||||
attachment: None,
|
||||
fields: test_fields("c"),
|
||||
},
|
||||
];
|
||||
let updated_records = vec![
|
||||
// d is new
|
||||
RemoteSettingsRecord {
|
||||
id: "d".into(),
|
||||
last_modified: 1300,
|
||||
deleted: false,
|
||||
attachment: None,
|
||||
fields: test_fields("d"),
|
||||
},
|
||||
// b was deleted
|
||||
RemoteSettingsRecord {
|
||||
id: "b".into(),
|
||||
last_modified: 1200,
|
||||
deleted: true,
|
||||
attachment: None,
|
||||
fields: RsJsonObject::new(),
|
||||
},
|
||||
// a was updated
|
||||
RemoteSettingsRecord {
|
||||
id: "a".into(),
|
||||
last_modified: 1100,
|
||||
deleted: false,
|
||||
attachment: None,
|
||||
fields: test_fields("a-with-new-data"),
|
||||
},
|
||||
// c was not modified, so it's not present in the new response
|
||||
];
|
||||
let expected_records = vec![
|
||||
// a was updated
|
||||
RemoteSettingsRecord {
|
||||
id: "a".into(),
|
||||
last_modified: 1100,
|
||||
deleted: false,
|
||||
attachment: None,
|
||||
fields: test_fields("a-with-new-data"),
|
||||
},
|
||||
RemoteSettingsRecord {
|
||||
id: "c".into(),
|
||||
last_modified: 300,
|
||||
deleted: false,
|
||||
attachment: None,
|
||||
fields: test_fields("c"),
|
||||
},
|
||||
RemoteSettingsRecord {
|
||||
id: "d".into(),
|
||||
last_modified: 1300,
|
||||
deleted: false,
|
||||
attachment: None,
|
||||
fields: test_fields("d"),
|
||||
},
|
||||
];
|
||||
|
||||
// Set initial records
|
||||
storage.set_records(collection_url, &initial_records)?;
|
||||
|
||||
// Verify initial records
|
||||
let fetched_records = storage.get_records(collection_url)?.unwrap();
|
||||
assert_eq!(fetched_records, initial_records);
|
||||
|
||||
// Update records
|
||||
storage.merge_records(collection_url, &updated_records)?;
|
||||
|
||||
// Verify updated records
|
||||
let mut fetched_records = storage.get_records(collection_url)?.unwrap();
|
||||
fetched_records.sort_by_cached_key(|r| r.id.clone());
|
||||
assert_eq!(fetched_records, expected_records);
|
||||
|
||||
// Verify last modified timestamp
|
||||
let last_modified = storage.get_last_modified_timestamp(collection_url)?;
|
||||
assert_eq!(last_modified, Some(1300));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
1
third_party/rust/remote_settings/uniffi.toml
vendored
1
third_party/rust/remote_settings/uniffi.toml
vendored
@@ -13,3 +13,4 @@ from_custom = "{}.toString()"
|
||||
[bindings.swift]
|
||||
ffi_module_name = "MozillaRustComponents"
|
||||
ffi_module_filename = "remote_settingsFFI"
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"e86c665d7d352d0cde418dfd50c4247a5d8747a4f9fcb34c8b5b8bdb07b25a85","src/conn_ext.rs":"1280fb1f06b74ed312e73f34c4fd86f538411c4b3d4eeccb631c80d02e295645","src/debug_tools.rs":"bece2bc3d35379b81ea2f942a0a3e909e0ab0553656505904745548eacaf402a","src/each_chunk.rs":"e900a4ebadad31b0a87cb8d7c3ed5aeb7325d4d380ae1d9174eff62c78facdcc","src/lazy.rs":"a96b4f4ec572538b49cdfa8fee981dcf5143a5f51163fb8a573d3ac128df70f9","src/lib.rs":"32c7f50c283e3406d70b0cb787984600843de74bbfb3d94ffc3a4e44af35ecfa","src/maybe_cached.rs":"0b18425595055883a98807fbd62ff27a79c18af34e7cb3439f8c3438463ef2dd","src/open_database.rs":"b0f748ae88739db9e706a1f7f3d5b02769e689df59ff8ef2e894f2b503f80c70","src/repeat.rs":"3dad3cbc6f47fc7598fc7b0fbf79b9c915322396d1f64d3d09651d100d428351"},"package":null}
|
||||
{"files":{"Cargo.toml":"f952db0361a922f4a3e540ee6338fdca953154af89f8eba1a557e2f5072dc75f","src/conn_ext.rs":"7c4ea787532733772cc840ecab47153d14533279351d9aa16cb5becec8b2345b","src/debug_tools.rs":"bece2bc3d35379b81ea2f942a0a3e909e0ab0553656505904745548eacaf402a","src/each_chunk.rs":"e900a4ebadad31b0a87cb8d7c3ed5aeb7325d4d380ae1d9174eff62c78facdcc","src/lazy.rs":"a96b4f4ec572538b49cdfa8fee981dcf5143a5f51163fb8a573d3ac128df70f9","src/lib.rs":"b2c120db4928c3e4abdd96405fd4c1016255699bdbc38c8cd60dbd3431fc0a12","src/maybe_cached.rs":"0b18425595055883a98807fbd62ff27a79c18af34e7cb3439f8c3438463ef2dd","src/open_database.rs":"b0f748ae88739db9e706a1f7f3d5b02769e689df59ff8ef2e894f2b503f80c70","src/repeat.rs":"b4c5ff5d083afba7f9f153f54aba2e6859b78b85c82d48dbd6bd58f67da9e6b9"},"package":null}
|
||||
9
third_party/rust/sql-support/Cargo.toml
vendored
9
third_party/rust/sql-support/Cargo.toml
vendored
@@ -37,6 +37,10 @@ thiserror = "1.0"
|
||||
[dependencies.interrupt-support]
|
||||
path = "../interrupt"
|
||||
|
||||
[dependencies.prettytable-rs]
|
||||
version = "0.10"
|
||||
optional = true
|
||||
|
||||
[dependencies.rusqlite]
|
||||
version = "0.31.0"
|
||||
features = [
|
||||
@@ -54,5 +58,8 @@ default-features = false
|
||||
path = "../rc_crypto/nss/nss_build_common"
|
||||
|
||||
[features]
|
||||
debug-tools = ["rusqlite/column_decltype"]
|
||||
debug-tools = [
|
||||
"dep:prettytable-rs",
|
||||
"rusqlite/column_decltype",
|
||||
]
|
||||
default = []
|
||||
|
||||
10
third_party/rust/sql-support/src/conn_ext.rs
vendored
10
third_party/rust/sql-support/src/conn_ext.rs
vendored
@@ -252,14 +252,14 @@ impl ConnExt for Connection {
|
||||
}
|
||||
}
|
||||
|
||||
impl ConnExt for Transaction<'_> {
|
||||
impl<'conn> ConnExt for Transaction<'conn> {
|
||||
#[inline]
|
||||
fn conn(&self) -> &Connection {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl ConnExt for Savepoint<'_> {
|
||||
impl<'conn> ConnExt for Savepoint<'conn> {
|
||||
#[inline]
|
||||
fn conn(&self) -> &Connection {
|
||||
self
|
||||
@@ -365,7 +365,7 @@ impl<'conn> UncheckedTransaction<'conn> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Deref for UncheckedTransaction<'_> {
|
||||
impl<'conn> Deref for UncheckedTransaction<'conn> {
|
||||
type Target = Connection;
|
||||
|
||||
#[inline]
|
||||
@@ -374,7 +374,7 @@ impl Deref for UncheckedTransaction<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for UncheckedTransaction<'_> {
|
||||
impl<'conn> Drop for UncheckedTransaction<'conn> {
|
||||
fn drop(&mut self) {
|
||||
if let Err(e) = self.finish_() {
|
||||
log::warn!("Error dropping an unchecked transaction: {}", e);
|
||||
@@ -382,7 +382,7 @@ impl Drop for UncheckedTransaction<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
impl ConnExt for UncheckedTransaction<'_> {
|
||||
impl<'conn> ConnExt for UncheckedTransaction<'conn> {
|
||||
#[inline]
|
||||
fn conn(&self) -> &Connection {
|
||||
self
|
||||
|
||||
11
third_party/rust/sql-support/src/lib.rs
vendored
11
third_party/rust/sql-support/src/lib.rs
vendored
@@ -8,16 +8,7 @@
|
||||
//! A crate with various sql/sqlcipher helpers.
|
||||
|
||||
mod conn_ext;
|
||||
|
||||
// XXX - temporarily disable our debug_tools, to avoid pulling in:
|
||||
// prettytable-rs = { version = "0.10", optional = true }
|
||||
// while vendoring into m-c :(
|
||||
pub mod debug_tools {
|
||||
pub fn define_debug_functions(_c: &rusqlite::Connection) -> rusqlite::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub mod debug_tools;
|
||||
mod each_chunk;
|
||||
mod lazy;
|
||||
mod maybe_cached;
|
||||
|
||||
2
third_party/rust/sql-support/src/repeat.rs
vendored
2
third_party/rust/sql-support/src/repeat.rs
vendored
@@ -14,7 +14,7 @@ pub struct RepeatDisplay<'a, F> {
|
||||
fmt_one: F,
|
||||
}
|
||||
|
||||
impl<F> fmt::Display for RepeatDisplay<'_, F>
|
||||
impl<'a, F> fmt::Display for RepeatDisplay<'a, F>
|
||||
where
|
||||
F: Fn(usize, &mut fmt::Formatter<'_>) -> fmt::Result,
|
||||
{
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"9d916eaffdcc513fb5652c556a01301f0f9fc4816bfcbf0213f0f60005b9a8f0","README.md":"5e28baf874b643d756228bdab345e287bf107d3182dfe6a18aafadcc4b9a3fc9","benches/benchmark_all.rs":"b99f5589316adaabd9e3a7264273da98041643272b99850349be71ff17078a2d","metrics.yaml":"0540ab2271aeab7f07335c7ceec12acde942995f9dcb3c29070489aa61899d56","src/benchmarks/README.md":"ccee8dbddba8762d0453fa855bd6984137b224b8c019f3dd8e86a3c303f51d71","src/benchmarks/client.rs":"a777c0b876a481a21f9d5fbb696b42672ed0b4af359f62f047ac8240d3e35853","src/benchmarks/geoname.rs":"00fab05cf9465cf8e22e143cde75a81885411001b240af00efda4071975d0563","src/benchmarks/ingest.rs":"d4bde332287cc4a8d95fd7852d7563537f30c578e218a01d32ea7a3d50af3811","src/benchmarks/mod.rs":"d4726b1702d602fa765397594f4d0f7c6df27adb950675c8bc52e5cf76a07804","src/benchmarks/query.rs":"d54946063e72cf98e7f46d94665c17c66af637774c2bb50cd5798dbe63d74f3c","src/bin/debug_ingestion_sizes.rs":"ce6e810be7b3fc19e826d75b622b82cfab5a1a99397a6d0833c2c4eebff2d364","src/config.rs":"0ca876e845841bb6429862c0904c82265003f53b55aea053fac60aed278586a7","src/db.rs":"b6d8c9bd425f65e65539858e73a9e4deb2481edcf66e0da309754edf92e6aabc","src/error.rs":"e2ef3ec0e0b2b8ecbb8f2f1717d4cb753af06913b8395d086b7643098ad100a7","src/fakespot.rs":"f501c9fe5296e7c130a9fcb532b861465717652cb5ef688230bc7a3b94df91b1","src/geoname.rs":"811f7b26e547be5557bdefb3867206dd4f30237eaef6b3602bef683db5f44586","src/lib.rs":"2b13d19b0e4f917c3cb6d2ac4bc49b03afef319570923c75a16492c7ceafcf14","src/metrics.rs":"871f0d834efbbc9e26d61f66fa31f0021dcf41444746cd7c082f93ba9628e399","src/pocket.rs":"1316668840ec9b4ea886223921dc9d3b5a1731d1a5206c0b1089f2a6c45c1b7b","src/provider.rs":"f1659693f487c769bd57ba532b367d18db323f96c2b8d3c8ad38a775e45426a8","src/query.rs":"84b97997036a3a597b0574e719e7407ddf0f18bd55c07a704bd2cacd549e8509","src/rs.rs":"953f978b30ca6ebaf18dab5ba8fa02dd076851e83d5f936ea8ab6016e7e17db9","src/schema.rs":"cf58f083e19905c2b07de109398c25304a62b4c571f9729fe4adf48cef90bd24","src/store.rs":"d9d99cabbe8d3aaf5a25c72d9006b3b864a745085e23d246e52389784378f239","src/suggestion.rs":"cf4b457d7499dc8dabedbc14536fe915969378a25cc45ca9f25139843558b68d","src/testing/client.rs":"f8c9bd32d0f4cf364daebe114d580c7e36a83b69c07884d14170969620d9a437","src/testing/data.rs":"d4fc5227996a8b115d93243fdbd83bc57d73a8c2d4c0b20dffa15bbec27925cb","src/testing/mod.rs":"4d2781c77ed9ace9d80d6d00c63a06bf28a4156f223616fffe3c07e64a8041db","src/util.rs":"52c6ec405637afa2d1a89f29fbbb7dcc341546b6deb97d326c4490bbf8713cb0","src/weather.rs":"8e8958a5a16f09f7d33efc6036d4ba980a2eea53c2d16bcbb37debebde28ef61","src/yelp.rs":"0b9dfa698d9c3162d47c0103d1799838d444345f9d7f943eedc6bcc98fd8b57d","uniffi.toml":"8205e4679ac26d53e70af0f85c013fd27cda1119f4322aebf5f2b9403d45a611"},"package":null}
|
||||
{"files":{"Cargo.toml":"9d916eaffdcc513fb5652c556a01301f0f9fc4816bfcbf0213f0f60005b9a8f0","README.md":"5e28baf874b643d756228bdab345e287bf107d3182dfe6a18aafadcc4b9a3fc9","benches/benchmark_all.rs":"744ae33fe4ebe4ee5187987dd72d028f5d0cac8c04765ba186d5598e14982049","metrics.yaml":"0540ab2271aeab7f07335c7ceec12acde942995f9dcb3c29070489aa61899d56","src/benchmarks/README.md":"ccee8dbddba8762d0453fa855bd6984137b224b8c019f3dd8e86a3c303f51d71","src/benchmarks/client.rs":"a777c0b876a481a21f9d5fbb696b42672ed0b4af359f62f047ac8240d3e35853","src/benchmarks/geoname.rs":"00fab05cf9465cf8e22e143cde75a81885411001b240af00efda4071975d0563","src/benchmarks/ingest.rs":"d4bde332287cc4a8d95fd7852d7563537f30c578e218a01d32ea7a3d50af3811","src/benchmarks/mod.rs":"d4726b1702d602fa765397594f4d0f7c6df27adb950675c8bc52e5cf76a07804","src/benchmarks/query.rs":"d54946063e72cf98e7f46d94665c17c66af637774c2bb50cd5798dbe63d74f3c","src/bin/debug_ingestion_sizes.rs":"ce6e810be7b3fc19e826d75b622b82cfab5a1a99397a6d0833c2c4eebff2d364","src/config.rs":"0ca876e845841bb6429862c0904c82265003f53b55aea053fac60aed278586a7","src/db.rs":"b6d8c9bd425f65e65539858e73a9e4deb2481edcf66e0da309754edf92e6aabc","src/error.rs":"e2ef3ec0e0b2b8ecbb8f2f1717d4cb753af06913b8395d086b7643098ad100a7","src/fakespot.rs":"03d3aac07b3a3a9ceb8d2c452d4a122bfebf04579829e62e83487877055312d4","src/geoname.rs":"811f7b26e547be5557bdefb3867206dd4f30237eaef6b3602bef683db5f44586","src/lib.rs":"2b13d19b0e4f917c3cb6d2ac4bc49b03afef319570923c75a16492c7ceafcf14","src/metrics.rs":"871f0d834efbbc9e26d61f66fa31f0021dcf41444746cd7c082f93ba9628e399","src/pocket.rs":"1316668840ec9b4ea886223921dc9d3b5a1731d1a5206c0b1089f2a6c45c1b7b","src/provider.rs":"f1659693f487c769bd57ba532b367d18db323f96c2b8d3c8ad38a775e45426a8","src/query.rs":"84b97997036a3a597b0574e719e7407ddf0f18bd55c07a704bd2cacd549e8509","src/rs.rs":"953f978b30ca6ebaf18dab5ba8fa02dd076851e83d5f936ea8ab6016e7e17db9","src/schema.rs":"cf58f083e19905c2b07de109398c25304a62b4c571f9729fe4adf48cef90bd24","src/store.rs":"df897e7762ef3db41223a078d6f245a75445f570e3a3b95d7be701836719f353","src/suggestion.rs":"cf4b457d7499dc8dabedbc14536fe915969378a25cc45ca9f25139843558b68d","src/testing/client.rs":"f8c9bd32d0f4cf364daebe114d580c7e36a83b69c07884d14170969620d9a437","src/testing/data.rs":"d4fc5227996a8b115d93243fdbd83bc57d73a8c2d4c0b20dffa15bbec27925cb","src/testing/mod.rs":"4d2781c77ed9ace9d80d6d00c63a06bf28a4156f223616fffe3c07e64a8041db","src/util.rs":"52c6ec405637afa2d1a89f29fbbb7dcc341546b6deb97d326c4490bbf8713cb0","src/weather.rs":"8e8958a5a16f09f7d33efc6036d4ba980a2eea53c2d16bcbb37debebde28ef61","src/yelp.rs":"bc036ff71b438d53ce8811acd8d650d83ef03faeea476f5b659b403c1e64ff2b","uniffi.toml":"19ea9cfd30d2e57ffad125b7eeef7f9228d43347fceb8bb9a54a0e66177eb2e5"},"package":null}
|
||||
@@ -1,7 +1,3 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use criterion::{
|
||||
criterion_group, criterion_main, measurement::Measurement, BatchSize, BenchmarkGroup, Criterion,
|
||||
};
|
||||
|
||||
2
third_party/rust/suggest/src/fakespot.rs
vendored
2
third_party/rust/suggest/src/fakespot.rs
vendored
@@ -3,7 +3,7 @@
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
/// Fakespot-specific logic
|
||||
///
|
||||
|
||||
/// Score used to order Fakespot suggestions
|
||||
///
|
||||
/// FakespotScore contains several components, each in the range of [0, 1]
|
||||
|
||||
8
third_party/rust/suggest/src/store.rs
vendored
8
third_party/rust/suggest/src/store.rs
vendored
@@ -278,11 +278,9 @@ impl SuggestStore {
|
||||
/// since city and region names are not unique. `filter` is disjunctive: If
|
||||
/// any item in `filter` matches a geoname, the geoname will be filtered in.
|
||||
///
|
||||
/// The query can match a single geoname in more than one way. For example,
|
||||
/// it can match both a full name and an abbreviation. The returned vec of
|
||||
/// [`GeonameMatch`] values will include all matches for a geoname, one
|
||||
/// match per `match_type` per geoname. In other words, a matched geoname
|
||||
/// can map to more than one `GeonameMatch`.
|
||||
/// The query can match a geoname in more than one way, for example both a
|
||||
/// full name and an abbreviation. The returned vec of [`GeonameMatch`]
|
||||
/// values will include all matches for a geoname, one match per geoname.
|
||||
#[handle_error(Error)]
|
||||
pub fn fetch_geonames(
|
||||
&self,
|
||||
|
||||
2
third_party/rust/suggest/src/yelp.rs
vendored
2
third_party/rust/suggest/src/yelp.rs
vendored
@@ -50,7 +50,7 @@ const MAX_MODIFIER_WORDS_NUMBER: usize = 2;
|
||||
/// At least this many characters must be typed for a subject to be matched.
|
||||
const SUBJECT_PREFIX_MATCH_THRESHOLD: usize = 2;
|
||||
|
||||
impl SuggestDao<'_> {
|
||||
impl<'a> SuggestDao<'a> {
|
||||
/// Inserts the suggestions for Yelp attachment into the database.
|
||||
pub(crate) fn insert_yelp_suggestions(
|
||||
&mut self,
|
||||
|
||||
2
third_party/rust/suggest/uniffi.toml
vendored
2
third_party/rust/suggest/uniffi.toml
vendored
@@ -1,6 +1,6 @@
|
||||
[bindings.kotlin]
|
||||
package_name = "mozilla.appservices.suggest"
|
||||
|
||||
|
||||
[bindings.swift]
|
||||
ffi_module_name = "MozillaRustComponents"
|
||||
ffi_module_filename = "suggestFFI"
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"1a41a746401ba02eb4272c9435ac8048d8e503bc912372a916444fa8b57ac204","src/lib.rs":"a112b66270feba587d0b09e64b4197af01f981675a23f76649a7d948f85c2bd9","src/rusqlite_support.rs":"c6791f103c286858a1a6e2c7e106b177ed8d9196b73ed100a8bb0aec1b1f957f","src/serde_support.rs":"0aade33dae88373d250ad921295d8dfe344e655f7c6240e3491d11ffc774443f"},"package":null}
|
||||
{"files":{"Cargo.toml":"1a41a746401ba02eb4272c9435ac8048d8e503bc912372a916444fa8b57ac204","src/lib.rs":"a112b66270feba587d0b09e64b4197af01f981675a23f76649a7d948f85c2bd9","src/rusqlite_support.rs":"c6791f103c286858a1a6e2c7e106b177ed8d9196b73ed100a8bb0aec1b1f957f","src/serde_support.rs":"99668580adb3c28ee7d3ae00ad4cf52e297aec3eeeaed11b200fa8c7e17319f1"},"package":null}
|
||||
@@ -12,7 +12,7 @@ use serde::{
|
||||
use crate::Guid;
|
||||
|
||||
struct GuidVisitor;
|
||||
impl Visitor<'_> for GuidVisitor {
|
||||
impl<'de> Visitor<'de> for GuidVisitor {
|
||||
type Value = Guid;
|
||||
#[inline]
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
|
||||
2
third_party/rust/sync15/.cargo-checksum.json
vendored
2
third_party/rust/sync15/.cargo-checksum.json
vendored
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"ba77b37be3e670a02edd70f2d01e5088eb79503efb40835c7dc1d7ca12c98809","README.md":"6d4ff5b079ac5340d18fa127f583e7ad793c5a2328b8ecd12c3fc723939804f2","build.rs":"aa971160d67ce8626b26e15c04c34b730f594c45c817aae34cfc9f3ea14ae284","src/bso/content.rs":"92935258745bdf0c3915a555cb6884a7fa69faa1290ec2c1815f6e2f3c0f0562","src/bso/crypto.rs":"27602dcccb37d3a55620ee4e16b705da455d49af575de115c7c79c0178eb1d6d","src/bso/mod.rs":"1431db19f3187fad1e4736146e0d2f24826f08b10f8623c0550b4e5a9c86328d","src/bso/test_utils.rs":"4ec5a2df5e1c0ec14dc770681e959bdcef6ef04f6fde435999197f46a8ae4831","src/client/coll_state.rs":"13e6ef55273baf5536acc369be522e34a803a32cabf19cce43e426aea9b6223e","src/client/coll_update.rs":"dac04a90c29dd969f8b4250414609c9b6d61daf2dfa4ae77d1c4a165ba970b05","src/client/collection_keys.rs":"c27b2277a3a52033b58ab01490fc2ea7007494195dd5e6dc2c6931a4ca96795a","src/client/mod.rs":"8f588d4a035cf79d96f2500f06d5651c1a7c566127c456ffa5429811ddce3fd6","src/client/request.rs":"e878c5b43298b6eb682748474963f9fb8d053b4dc690bbb27107f5fa0ee74e01","src/client/state.rs":"4e31193ef2471c1dfabf1c6a391bcb95e14ddb45855786a4194ff187d5c9347c","src/client/status.rs":"f445a8765dac9789444e23b5145148413407bb1d18a15ef56682243997f591bf","src/client/storage_client.rs":"edfb44538b8f8ccb3a38d8942105dde33a6c9cef8abb0c3bdcd91b55bbdda059","src/client/sync.rs":"b29abb512ec9d163f7883b71f78c9202802dcb17cad1fc5dc08087fb0bb66704","src/client/sync_multiple.rs":"67a0e6b9049e5b1b1b248febe392b53eb54bb77e9ddddfba62da975389adf3aa","src/client/token.rs":"13729c693c8be72bcafc816c97e2a35932d008b4f2ccda6a5f8cdb8b2c99a293","src/client/util.rs":"71cc70ee41f821f53078675e636e9fad9c6046fa1a989e37f5487e340a2277d6","src/client_types.rs":"3c3cac1540b92482f43660d9e43bdde8481c4cc1a98253a68c80e791231f5976","src/clients_engine/engine.rs":"31c0b6934152f3921af83dadf5d2b22205f49a501427cd736c62f782595cece3","src/clients_engine/mod.rs":"461729e6f89b66b2cbd89b041a03d4d6a8ba582284ed4f3015cb13e1a0c6da97","src/clients_engine/record.rs":"b0d84bf420743d7638a45e4836633a45e50257d5548fe7ecd04bff4d724439b8","src/clients_engine/ser.rs":"be6a19c45eb8002ff8e7cf746d2f97d9cecd1740f9817a8f1d624825475fd777","src/device_type.rs":"dc2d4296d25e31471c8e68488f1043ff239b902036cd6aea8a686cf79b4ed335","src/enc_payload.rs":"aa3eea7df49b24cd59831680a47c417b73a3e36e6b0f3f4baf14ca66bd68be6b","src/engine/bridged_engine.rs":"b4e3071a0259ac55303364e57f9cd685916b80dc302030bba07790e55ceecb66","src/engine/mod.rs":"d0d031d80fbdd90686c443b8c44720ab2ab0aff2c1106e0fdd7d60c46361fe8b","src/engine/request.rs":"5923025fb9550178339f880a1bf8526d8e853e7a0b2bce6d9d687cc808ac0085","src/engine/sync_engine.rs":"531b35d72ce9e04c3e543c0468c1e450fba2c0dc3d33d68d9b1c0a5c1ad7dd34","src/error.rs":"a45cfe02e6301f473c34678b694943c1a04308b8c292c6e0448bf495194c3b5e","src/key_bundle.rs":"abd0781f3be8c8e7c691f18bb71f3433b633803c48da9794e15ac6301ed60d6c","src/lib.rs":"f59f8817978d943518dfa03ab31fc0f6b1fc72ee9943a97aef1537e2769649f5","src/record_types.rs":"02bb3d352fb808131d298f9b90d9c95b7e9e0138b97c5401f3b9fdacc5562f44","src/server_timestamp.rs":"63916817796e83fe31fbd598bac025dfa71ec9e1808d09073db258c78a3331cd","src/sync15.udl":"464047a67a7877bc671f9f3aca13f3039cf34beb51756bcdb86015d789a8f400","src/telemetry.rs":"f332b3849824db6b131a7c2dfe20f56075c6a66ad72f6697bc283d914126b423","uniffi.toml":"d9a5a5cb0eee5218f5eee4d8d89214cc1d7fb5b49323fd17becdf4adb706a6aa"},"package":null}
|
||||
{"files":{"Cargo.toml":"29deda14f13e81c76d4a142467d2ae7f356a64f5eeaa4aeedd602e8d57e8f6a7","README.md":"6d4ff5b079ac5340d18fa127f583e7ad793c5a2328b8ecd12c3fc723939804f2","build.rs":"aa971160d67ce8626b26e15c04c34b730f594c45c817aae34cfc9f3ea14ae284","src/bso/content.rs":"92935258745bdf0c3915a555cb6884a7fa69faa1290ec2c1815f6e2f3c0f0562","src/bso/crypto.rs":"27602dcccb37d3a55620ee4e16b705da455d49af575de115c7c79c0178eb1d6d","src/bso/mod.rs":"09e723dc7e99295ecafdcadffaf604d66ea27cf2b7f1fd9ab3cac4f4698ff6a7","src/bso/test_utils.rs":"4ec5a2df5e1c0ec14dc770681e959bdcef6ef04f6fde435999197f46a8ae4831","src/client/coll_state.rs":"13e6ef55273baf5536acc369be522e34a803a32cabf19cce43e426aea9b6223e","src/client/coll_update.rs":"dac04a90c29dd969f8b4250414609c9b6d61daf2dfa4ae77d1c4a165ba970b05","src/client/collection_keys.rs":"c27b2277a3a52033b58ab01490fc2ea7007494195dd5e6dc2c6931a4ca96795a","src/client/mod.rs":"8f588d4a035cf79d96f2500f06d5651c1a7c566127c456ffa5429811ddce3fd6","src/client/request.rs":"e878c5b43298b6eb682748474963f9fb8d053b4dc690bbb27107f5fa0ee74e01","src/client/state.rs":"4e31193ef2471c1dfabf1c6a391bcb95e14ddb45855786a4194ff187d5c9347c","src/client/status.rs":"f445a8765dac9789444e23b5145148413407bb1d18a15ef56682243997f591bf","src/client/storage_client.rs":"cc4a3219f342f8665399734902f68a2ddf12ed7e3726033ed10084bcefb66ffd","src/client/sync.rs":"b29abb512ec9d163f7883b71f78c9202802dcb17cad1fc5dc08087fb0bb66704","src/client/sync_multiple.rs":"6e92571132f89744b553190c596be8aff9b2d031d8f79d82c94cdf78b1683f4a","src/client/token.rs":"13729c693c8be72bcafc816c97e2a35932d008b4f2ccda6a5f8cdb8b2c99a293","src/client/util.rs":"71cc70ee41f821f53078675e636e9fad9c6046fa1a989e37f5487e340a2277d6","src/client_types.rs":"3c3cac1540b92482f43660d9e43bdde8481c4cc1a98253a68c80e791231f5976","src/clients_engine/engine.rs":"9e11b47be81fc63214f31879af74075674aa50a8f8989afe20fefa7990fa99b9","src/clients_engine/mod.rs":"461729e6f89b66b2cbd89b041a03d4d6a8ba582284ed4f3015cb13e1a0c6da97","src/clients_engine/record.rs":"b0d84bf420743d7638a45e4836633a45e50257d5548fe7ecd04bff4d724439b8","src/clients_engine/ser.rs":"be6a19c45eb8002ff8e7cf746d2f97d9cecd1740f9817a8f1d624825475fd777","src/device_type.rs":"dc2d4296d25e31471c8e68488f1043ff239b902036cd6aea8a686cf79b4ed335","src/enc_payload.rs":"aa3eea7df49b24cd59831680a47c417b73a3e36e6b0f3f4baf14ca66bd68be6b","src/engine/bridged_engine.rs":"b4e3071a0259ac55303364e57f9cd685916b80dc302030bba07790e55ceecb66","src/engine/mod.rs":"90f1f9760f5f712a337aebb04e59c736e4b6fbd89d6a188d969210c7f3f321ae","src/engine/request.rs":"5923025fb9550178339f880a1bf8526d8e853e7a0b2bce6d9d687cc808ac0085","src/engine/sync_engine.rs":"531b35d72ce9e04c3e543c0468c1e450fba2c0dc3d33d68d9b1c0a5c1ad7dd34","src/error.rs":"a45cfe02e6301f473c34678b694943c1a04308b8c292c6e0448bf495194c3b5e","src/key_bundle.rs":"abd0781f3be8c8e7c691f18bb71f3433b633803c48da9794e15ac6301ed60d6c","src/lib.rs":"f59f8817978d943518dfa03ab31fc0f6b1fc72ee9943a97aef1537e2769649f5","src/record_types.rs":"02bb3d352fb808131d298f9b90d9c95b7e9e0138b97c5401f3b9fdacc5562f44","src/server_timestamp.rs":"63916817796e83fe31fbd598bac025dfa71ec9e1808d09073db258c78a3331cd","src/sync15.udl":"464047a67a7877bc671f9f3aca13f3039cf34beb51756bcdb86015d789a8f400","src/telemetry.rs":"bd26ce867be1c4e7ff616289043d50ded94a5a22351c5a32a2cbd5d6c008a79e","uniffi.toml":"d9a5a5cb0eee5218f5eee4d8d89214cc1d7fb5b49323fd17becdf4adb706a6aa"},"package":null}
|
||||
5
third_party/rust/sync15/Cargo.toml
vendored
5
third_party/rust/sync15/Cargo.toml
vendored
@@ -74,7 +74,7 @@ features = ["random"]
|
||||
version = "0.28.2"
|
||||
|
||||
[dependencies.url]
|
||||
version = "2"
|
||||
version = "2.1"
|
||||
optional = true
|
||||
|
||||
[dependencies.viaduct]
|
||||
@@ -105,6 +105,3 @@ sync-client = [
|
||||
"url",
|
||||
]
|
||||
sync-engine = ["random-guid"]
|
||||
|
||||
[lints.clippy]
|
||||
empty-line-after-doc-comments = "allow"
|
||||
|
||||
16
third_party/rust/sync15/src/bso/mod.rs
vendored
16
third_party/rust/sync15/src/bso/mod.rs
vendored
@@ -9,15 +9,15 @@
|
||||
/// the server timestamp of the resource, etc) and a field called `payload`.
|
||||
/// A bso is serialized to and from JSON.
|
||||
/// * There's a "cleartext" bso:
|
||||
/// * The payload is a String, which itself is JSON encoded (ie, this string `payload` is
|
||||
/// always double JSON encoded in a server record)
|
||||
/// * This supplies helper methods for working with the "content" (some arbitrary <T>) in the
|
||||
/// payload.
|
||||
/// ** The payload is a String, which itself is JSON encoded (ie, this string `payload` is
|
||||
/// always double JSON encoded in a server record)
|
||||
/// ** This supplies helper methods for working with the "content" (some arbitrary <T>) in the
|
||||
/// payload.
|
||||
/// * There's an "encrypted" bso
|
||||
/// * The payload is an [crate::enc_payload::EncryptedPayload]
|
||||
/// * Only clients use this; as soon as practical we decrypt and as late as practical we encrypt
|
||||
/// to and from encrypted bsos.
|
||||
/// * The encrypted bsos etc are all in the [crypto] module and require the `crypto` feature.
|
||||
/// ** The payload is an [crate::enc_payload::EncryptedPayload]
|
||||
/// ** Only clients use this; as soon as practical we decrypt and as late as practical we encrypt
|
||||
/// to and from encrypted bsos.
|
||||
/// ** The encrypted bsos etc are all in the [crypto] module and require the `crypto` feature.
|
||||
///
|
||||
/// Let's look at some real-world examples:
|
||||
/// # meta/global
|
||||
|
||||
@@ -426,7 +426,7 @@ pub struct PostWrapper<'a> {
|
||||
coll: &'a CollectionName,
|
||||
}
|
||||
|
||||
impl BatchPoster for PostWrapper<'_> {
|
||||
impl<'a> BatchPoster for PostWrapper<'a> {
|
||||
fn post<T, O>(
|
||||
&self,
|
||||
bytes: Vec<u8>,
|
||||
|
||||
@@ -199,7 +199,7 @@ struct SyncMultipleDriver<'info, 'res, 'pgs, 'mcs> {
|
||||
saw_auth_error: bool,
|
||||
}
|
||||
|
||||
impl SyncMultipleDriver<'_, '_, '_, '_> {
|
||||
impl<'info, 'res, 'pgs, 'mcs> SyncMultipleDriver<'info, 'res, 'pgs, 'mcs> {
|
||||
/// The actual worker for sync_multiple.
|
||||
fn sync(mut self) -> result::Result<(), Error> {
|
||||
log::info!("Loading/initializing persisted state");
|
||||
|
||||
@@ -239,7 +239,7 @@ pub struct Engine<'a> {
|
||||
pub recent_clients: HashMap<String, RemoteClient>,
|
||||
}
|
||||
|
||||
impl Engine<'_> {
|
||||
impl<'a> Engine<'a> {
|
||||
/// Creates a new clients engine that delegates to the given command
|
||||
/// processor to apply incoming commands.
|
||||
pub fn new<'b>(
|
||||
|
||||
2
third_party/rust/sync15/src/engine/mod.rs
vendored
2
third_party/rust/sync15/src/engine/mod.rs
vendored
@@ -19,11 +19,9 @@
|
||||
//! * Code which implements the [crate::engine::bridged_engine::BridgedEngine]
|
||||
//! trait. These engines are a "bridge" between the Desktop JS Sync world and
|
||||
//! this rust code.
|
||||
//!
|
||||
//! While these engines end up doing the same thing, the difference is due to
|
||||
//! implementation differences between the Desktop Sync client and the Rust
|
||||
//! client.
|
||||
//!
|
||||
//! We intend merging these engines - the first step will be to merge the
|
||||
//! types and payload management used by these traits, then to combine the
|
||||
//! requirements into a single trait that captures both use-cases.
|
||||
|
||||
2
third_party/rust/sync15/src/telemetry.rs
vendored
2
third_party/rust/sync15/src/telemetry.rs
vendored
@@ -814,7 +814,7 @@ mod ping_tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Error> for SyncFailure {
|
||||
impl<'a> From<&'a Error> for SyncFailure {
|
||||
fn from(e: &Error) -> SyncFailure {
|
||||
match e {
|
||||
#[cfg(feature = "sync-client")]
|
||||
|
||||
2
third_party/rust/tabs/.cargo-checksum.json
vendored
2
third_party/rust/tabs/.cargo-checksum.json
vendored
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"5630e526d31d5ce38d812492c4c273a2b56fa993796b69c039f5043822855e0e","README.md":"c48b8f391ef822c4f3971b5f453a1e7b43bea232752d520460d2f04803aead1a","build.rs":"33e61b811b19ed2b58e319cc65d5988bed258d2c4fea2d706301184c59847a0f","src/error.rs":"6e5fd48a3f228d37977881a3657f8635b1b37e3b16d91ac2d8476174172a2a74","src/lib.rs":"5789fc7107c76168c331c175aff4f0b2ac2ba3d65cfa0df0e1d4f8ef0c6eb80c","src/schema.rs":"510218d465c7d26d6b9f342cc33c14ab83044a67561ef924c33dadb060761972","src/storage.rs":"d7ea62359a63b1738100568649cb00ebb035f5044d8b4c0fe5f3803e6f34cc30","src/store.rs":"30d854aa7ad1ee3a3cac683a1ae0b9fb3833c8d90537beafcd3e4b24f6e7c6e8","src/sync/bridge.rs":"18d3a7913a030b598d4b6cbd5b7e2ab4cef4cc7ea964f5bc84d7fb2f28787529","src/sync/engine.rs":"73007423f2a22314a034ac660aa65bd9c50e8aa850c445a66604486280067843","src/sync/mod.rs":"09ba3c87f1174a243bf5aaa481effd18929d54359ceb9b23ccb2c32ee3482f34","src/sync/record.rs":"eef6751c209d039958afbe245ddb006cfdf6b8b6b47f925f69c552b832b87922","src/tabs.udl":"99322a1d49d82fb436a279431f407f5b09c9d277e238a7d6fb270a744ec466af","uniffi.toml":"70a41bac1bbbde7a571f1b023f22636337ca3bffd6891dd67596fe13ab98b2f6"},"package":null}
|
||||
{"files":{"Cargo.toml":"b845e9e2b1abf126dede1a4ef607d2159aeb03e7720ca85876e5afdf7cc04d8b","README.md":"c48b8f391ef822c4f3971b5f453a1e7b43bea232752d520460d2f04803aead1a","build.rs":"33e61b811b19ed2b58e319cc65d5988bed258d2c4fea2d706301184c59847a0f","src/error.rs":"c38755d7db591c95ee5f0f14d8e8fbea4e8cfc63cf979cd9a1fed2b13e275a6a","src/lib.rs":"5789fc7107c76168c331c175aff4f0b2ac2ba3d65cfa0df0e1d4f8ef0c6eb80c","src/schema.rs":"510218d465c7d26d6b9f342cc33c14ab83044a67561ef924c33dadb060761972","src/storage.rs":"d7ea62359a63b1738100568649cb00ebb035f5044d8b4c0fe5f3803e6f34cc30","src/store.rs":"30d854aa7ad1ee3a3cac683a1ae0b9fb3833c8d90537beafcd3e4b24f6e7c6e8","src/sync/bridge.rs":"18d3a7913a030b598d4b6cbd5b7e2ab4cef4cc7ea964f5bc84d7fb2f28787529","src/sync/engine.rs":"73007423f2a22314a034ac660aa65bd9c50e8aa850c445a66604486280067843","src/sync/mod.rs":"09ba3c87f1174a243bf5aaa481effd18929d54359ceb9b23ccb2c32ee3482f34","src/sync/record.rs":"eef6751c209d039958afbe245ddb006cfdf6b8b6b47f925f69c552b832b87922","src/tabs.udl":"99322a1d49d82fb436a279431f407f5b09c9d277e238a7d6fb270a744ec466af","uniffi.toml":"70a41bac1bbbde7a571f1b023f22636337ca3bffd6891dd67596fe13ab98b2f6"},"package":null}
|
||||
5
third_party/rust/tabs/Cargo.toml
vendored
5
third_party/rust/tabs/Cargo.toml
vendored
@@ -39,7 +39,7 @@ serde = "1"
|
||||
serde_derive = "1"
|
||||
serde_json = "1"
|
||||
thiserror = "1.0"
|
||||
url = "2"
|
||||
url = "2.1"
|
||||
|
||||
[dependencies.error-support]
|
||||
path = "../support/error"
|
||||
@@ -85,6 +85,3 @@ default-features = false
|
||||
[build-dependencies.uniffi]
|
||||
version = "0.28.2"
|
||||
features = ["build"]
|
||||
|
||||
[lints.clippy]
|
||||
empty-line-after-doc-comments = "allow"
|
||||
|
||||
10
third_party/rust/tabs/src/error.rs
vendored
10
third_party/rust/tabs/src/error.rs
vendored
@@ -25,8 +25,14 @@ pub enum TabsApiError {
|
||||
// Error we use internally
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
// For historical reasons we have a mis-matched name between the error
|
||||
// and what the error actually represents.
|
||||
#[cfg(feature = "full-sync")]
|
||||
#[error("Error synchronizing: {0}")]
|
||||
SyncAdapterError(#[from] sync15::Error),
|
||||
|
||||
// Note we are abusing this as a kind of "mis-matched feature" error.
|
||||
// This works because when `full-sync` isn't enabled we don't actually
|
||||
// handle any sync15 errors as the bridged-engine never returns them.
|
||||
#[cfg(not(feature = "full-sync"))]
|
||||
#[error("Sync feature is disabled: {0}")]
|
||||
SyncAdapterError(String),
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"0f74fac18c6b7c0c3b6e16fe6e25f6b4a068fe8aa6e617c2b5ba3294cac1de87","README.md":"7507842687c0a9f7146318fe1541183a2fdca65ec86aafb12207c994012ab15a","src/backend.rs":"22c313dd0ecbe92803219d3770bb97b3f876ed2fdc4ac8b5ac8dbea92b563e9f","src/backend/ffi.rs":"a1ccc25c3f52cc94718624d39c082c9c7e34082804bb12f0b96f5d3a064e0c54","src/error.rs":"98ca92b58bd8b4f3c9d4c6d03ed235609d486fe8121277004283b9cfda6e3260","src/fetch_msg_types.proto":"de8a46a4947a140783a4d714364f18ccf02c4759d6ab5ace9da0b1c058efa6c3","src/headers.rs":"bf3cd6b717dfb337c64ce0bc6d275364181884378fc47afed7c80c435ce0733f","src/headers/name.rs":"dcfd4d42326724f822893cf6ac90f1e14734dba178150dcb606f4b19de5e66d7","src/lib.rs":"abddea31021b5743e4cc6d20c0bd89dc59b248a15405bf9717c79ed732950a35","src/mozilla.appservices.httpconfig.protobuf.rs":"9ede762489a0c07bc08a5b852b33013a410cb41b44b92a44555f85bb2db91412","src/settings.rs":"f62d0779d7b86af5daad0c23fb61a5982c11520e6fa528ebe2e2d6ad76e70afd"},"package":null}
|
||||
{"files":{"Cargo.toml":"5fb7a944f97ec8986fae73edbfd948d1d4ade7a8153aa2233c5f318823a7c753","README.md":"7507842687c0a9f7146318fe1541183a2fdca65ec86aafb12207c994012ab15a","src/backend.rs":"22c313dd0ecbe92803219d3770bb97b3f876ed2fdc4ac8b5ac8dbea92b563e9f","src/backend/ffi.rs":"a1ccc25c3f52cc94718624d39c082c9c7e34082804bb12f0b96f5d3a064e0c54","src/error.rs":"98ca92b58bd8b4f3c9d4c6d03ed235609d486fe8121277004283b9cfda6e3260","src/fetch_msg_types.proto":"de8a46a4947a140783a4d714364f18ccf02c4759d6ab5ace9da0b1c058efa6c3","src/headers.rs":"bf3cd6b717dfb337c64ce0bc6d275364181884378fc47afed7c80c435ce0733f","src/headers/name.rs":"dcfd4d42326724f822893cf6ac90f1e14734dba178150dcb606f4b19de5e66d7","src/lib.rs":"abddea31021b5743e4cc6d20c0bd89dc59b248a15405bf9717c79ed732950a35","src/mozilla.appservices.httpconfig.protobuf.rs":"9ede762489a0c07bc08a5b852b33013a410cb41b44b92a44555f85bb2db91412","src/settings.rs":"f62d0779d7b86af5daad0c23fb61a5982c11520e6fa528ebe2e2d6ad76e70afd"},"package":null}
|
||||
2
third_party/rust/viaduct/Cargo.toml
vendored
2
third_party/rust/viaduct/Cargo.toml
vendored
@@ -40,7 +40,7 @@ prost = "0.12"
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
thiserror = "1.0"
|
||||
url = "2"
|
||||
url = "2.1"
|
||||
|
||||
[dependencies.parking_lot]
|
||||
version = ">=0.11,<=0.12"
|
||||
|
||||
@@ -1 +1 @@
|
||||
{"files":{"Cargo.toml":"8a987d66d7550cf08b0c86c212a90cf6424e2f930e5d916bb2291cfdefaf6c59","README.md":"821cac7eb5b963fc3f3fe21dd890427ab2bbf335cb25cbae89b713b3350687c5","build.rs":"f4ff15cd54890d3e3636e77a0458ba9a8882f271ccb0056a0bbae1975cdd75d5","sql/create_schema.sql":"a17311a407ec10e033886b7125da4c8b84bc6d761f6b28edc9594de430e1d964","sql/create_sync_temp_tables.sql":"860ede362c94feb47d85522553fa2852f9bdb9f9b025d6438dd5dee3d4acd527","sql/tests/create_schema_v1.sql":"77cf0c90eaac3e1aea626537147e1b8ec349b68d6076c92fa7ae402aac613050","src/api.rs":"6a3c3e09797df06165091a3d66e3830febb63bd901250f89f7d6886ba0f408cd","src/db.rs":"9676325b259faadecfc28180d43f8b0a640a1204c2783fc626137eee62a9089b","src/error.rs":"10d99e3dc6a38951456c0fac7e46fb4f441e976b47fdddea257badbc66b8702d","src/ffi.rs":"f66a81393bebe7a4b7e7960cb426df106ff1f02bfebcaa6e335b4b8b56c5c936","src/lib.rs":"259fbbfe5b60fc1e43ef4cfe08fb131d6c1c42c49fee74a3b687e00ac91c361a","src/migration.rs":"a87a9628f31a4b5a1d1610f80fb37ddb8f94c28a9455980bd5153198d1d7aa2a","src/schema.rs":"d90556dcfa5784a2d3e9432968b6eed60630e3bff5978d73abd8da2b4c71ea63","src/store.rs":"a898492e4a53472414e114ad275bf6a750313a228e589814d73c803f6ce59288","src/sync/bridge.rs":"14d095bc67e511297b833e279912f61dd67993a877be941cc058afe9017cb058","src/sync/incoming.rs":"5899c880810968e1e99f4b2de5ea1eb9d0886643eea85c851571e4ac6f2eca08","src/sync/mod.rs":"75091c8f3138fa5585352b5ea7e5967088e76b1857d24cc02d3e142c0ee9e89d","src/sync/outgoing.rs":"e1577ceef9d1d87c0a0985cad16bc13938dc2144dd0befeb7aca2c97375e7ee7","src/sync/sync_tests.rs":"f999b9acfb95604e4b3b981a29527a490b64e39026ea9d9668418912cda54536","src/webext-storage.udl":"a37a5c796bcdc71b61dbb189a8801bbff9e67bf81aebb489db0e7fcafc58521d","uniffi.toml":"beeec89c2f877eb89be0090dc304dbc7c74e787385e7459bad78c6165bb66791"},"package":null}
|
||||
{"files":{"Cargo.toml":"b14f05d8a347866ed43c7601ddf0eaea6cd92680010b1a7a7581107bf4e3d758","README.md":"821cac7eb5b963fc3f3fe21dd890427ab2bbf335cb25cbae89b713b3350687c5","build.rs":"f4ff15cd54890d3e3636e77a0458ba9a8882f271ccb0056a0bbae1975cdd75d5","sql/create_schema.sql":"a17311a407ec10e033886b7125da4c8b84bc6d761f6b28edc9594de430e1d964","sql/create_sync_temp_tables.sql":"860ede362c94feb47d85522553fa2852f9bdb9f9b025d6438dd5dee3d4acd527","sql/tests/create_schema_v1.sql":"77cf0c90eaac3e1aea626537147e1b8ec349b68d6076c92fa7ae402aac613050","src/api.rs":"6a3c3e09797df06165091a3d66e3830febb63bd901250f89f7d6886ba0f408cd","src/db.rs":"bd2c3788c5933a5badea30f22e13f8ca7b5b15a10b5169fa75c2204338195f4c","src/error.rs":"8587813be8e2a7f5efad4216a5c4686554ed44e98cf94bfd9c2f2c9adc8e9a11","src/ffi.rs":"f66a81393bebe7a4b7e7960cb426df106ff1f02bfebcaa6e335b4b8b56c5c936","src/lib.rs":"ab25e7c6ea67fb905fe6dad866c0d2c462b1e93bcff283db947513aeabbb2d73","src/migration.rs":"a87a9628f31a4b5a1d1610f80fb37ddb8f94c28a9455980bd5153198d1d7aa2a","src/schema.rs":"d90556dcfa5784a2d3e9432968b6eed60630e3bff5978d73abd8da2b4c71ea63","src/store.rs":"54436bda57f3f341bd50f5903c10c5dfb0dcc3cf1c5de7ca0ab36af0a27b1a1e","src/sync/bridge.rs":"f392a40b20d4f55577cc76114171e213c6ce072b5b91598e92bb858dcbdebce3","src/sync/incoming.rs":"5899c880810968e1e99f4b2de5ea1eb9d0886643eea85c851571e4ac6f2eca08","src/sync/mod.rs":"a03691d480e94a41e56355b5314f76ec75bf2cf54324ca631def8c454400affa","src/sync/outgoing.rs":"e1577ceef9d1d87c0a0985cad16bc13938dc2144dd0befeb7aca2c97375e7ee7","src/sync/sync_tests.rs":"f999b9acfb95604e4b3b981a29527a490b64e39026ea9d9668418912cda54536","src/webext-storage.udl":"0341d431ba837cf64ea210ef6157010c6664a0b5a194e89acb0414938636b391","uniffi.toml":"beeec89c2f877eb89be0090dc304dbc7c74e787385e7459bad78c6165bb66791"},"package":null}
|
||||
3
third_party/rust/webext-storage/Cargo.toml
vendored
3
third_party/rust/webext-storage/Cargo.toml
vendored
@@ -95,6 +95,3 @@ features = ["build"]
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
||||
[lints.clippy]
|
||||
empty-line-after-doc-comments = "allow"
|
||||
|
||||
5
third_party/rust/webext-storage/src/db.rs
vendored
5
third_party/rust/webext-storage/src/db.rs
vendored
@@ -117,10 +117,13 @@ impl ThreadSafeStorageDb {
|
||||
Arc::clone(&self.interrupt_handle)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn begin_interrupt_scope(&self) -> Result<SqlInterruptScope> {
|
||||
Ok(self.interrupt_handle.begin_interrupt_scope()?)
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> StorageDb {
|
||||
self.db.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
// Deref to a Mutex<StorageDb>, which is how we will use ThreadSafeStorageDb most of the time
|
||||
|
||||
8
third_party/rust/webext-storage/src/error.rs
vendored
8
third_party/rust/webext-storage/src/error.rs
vendored
@@ -143,11 +143,3 @@ impl From<serde_json::Error> for WebExtStorageApiError {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<anyhow::Error> for WebExtStorageApiError {
|
||||
fn from(value: anyhow::Error) -> Self {
|
||||
WebExtStorageApiError::UnexpectedError {
|
||||
reason: value.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
15
third_party/rust/webext-storage/src/lib.rs
vendored
15
third_party/rust/webext-storage/src/lib.rs
vendored
@@ -25,7 +25,6 @@ pub use api::SYNC_QUOTA_BYTES_PER_ITEM;
|
||||
|
||||
pub use crate::error::{QuotaReason, WebExtStorageApiError};
|
||||
pub use crate::store::WebExtStorageStore;
|
||||
pub use crate::sync::{bridge::WebExtStorageBridgedEngine, SyncedExtensionChange};
|
||||
pub use api::UsageInfo;
|
||||
pub use api::{StorageChanges, StorageValueChange};
|
||||
|
||||
@@ -43,17 +42,3 @@ impl UniffiCustomTypeConverter for JsonValue {
|
||||
obj.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
// Our UDL uses a `Guid` type.
|
||||
use sync_guid::Guid;
|
||||
impl UniffiCustomTypeConverter for Guid {
|
||||
type Builtin = String;
|
||||
|
||||
fn into_custom(val: Self::Builtin) -> uniffi::Result<Guid> {
|
||||
Ok(Guid::new(val.as_str()))
|
||||
}
|
||||
|
||||
fn from_custom(obj: Self) -> Self::Builtin {
|
||||
obj.into()
|
||||
}
|
||||
}
|
||||
|
||||
40
third_party/rust/webext-storage/src/store.rs
vendored
40
third_party/rust/webext-storage/src/store.rs
vendored
@@ -29,7 +29,7 @@ use serde_json::Value as JsonValue;
|
||||
/// connection with our sync engines - ie, these engines also hold an Arc<>
|
||||
/// around the same object.
|
||||
pub struct WebExtStorageStore {
|
||||
pub(crate) db: Arc<ThreadSafeStorageDb>,
|
||||
db: Arc<ThreadSafeStorageDb>,
|
||||
}
|
||||
|
||||
impl WebExtStorageStore {
|
||||
@@ -124,16 +124,46 @@ impl WebExtStorageStore {
|
||||
|
||||
/// Returns the bytes in use for the specified items (which can be null,
|
||||
/// a string, or an array)
|
||||
pub fn get_bytes_in_use(&self, ext_id: &str, keys: JsonValue) -> Result<u64> {
|
||||
pub fn get_bytes_in_use(&self, ext_id: &str, keys: JsonValue) -> Result<usize> {
|
||||
let db = &self.db.lock();
|
||||
let conn = db.get_connection()?;
|
||||
Ok(api::get_bytes_in_use(conn, ext_id, keys)? as u64)
|
||||
api::get_bytes_in_use(conn, ext_id, keys)
|
||||
}
|
||||
|
||||
/// Returns a bridged sync engine for Desktop for this store.
|
||||
pub fn bridged_engine(&self) -> sync::BridgedEngine {
|
||||
sync::BridgedEngine::new(&self.db)
|
||||
}
|
||||
|
||||
/// Closes the store and its database connection. See the docs for
|
||||
/// `StorageDb::close` for more details on when this can fail.
|
||||
pub fn close(&self) -> Result<()> {
|
||||
let mut db = self.db.lock();
|
||||
pub fn close(self) -> Result<()> {
|
||||
// Even though this consumes `self`, the fact we use an Arc<> means
|
||||
// we can't guarantee we can actually consume the inner DB - so do
|
||||
// the best we can.
|
||||
let shared: ThreadSafeStorageDb = match Arc::into_inner(self.db) {
|
||||
Some(shared) => shared,
|
||||
_ => {
|
||||
// The only way this is possible is if the sync engine has an operation
|
||||
// running - but that shouldn't be possible in practice because desktop
|
||||
// uses a single "task queue" such that the close operation can't possibly
|
||||
// be running concurrently with any sync or storage tasks.
|
||||
|
||||
// If this *could* get hit, rusqlite will attempt to close the DB connection
|
||||
// as it is dropped, and if that close fails, then rusqlite 0.28.0 and earlier
|
||||
// would panic - but even that only happens if prepared statements are
|
||||
// not finalized, which ruqlite also does.
|
||||
|
||||
// tl;dr - this should be impossible. If it was possible, rusqlite might panic,
|
||||
// but we've never seen it panic in practice other places we don't close
|
||||
// connections, and the next rusqlite version will not panic anyway.
|
||||
// So this-is-fine.jpg
|
||||
log::warn!("Attempting to close a store while other DB references exist.");
|
||||
return Err(Error::OtherConnectionReferencesExist);
|
||||
}
|
||||
};
|
||||
// consume the mutex and get back the inner.
|
||||
let mut db = shared.into_inner();
|
||||
db.close()
|
||||
}
|
||||
|
||||
|
||||
100
third_party/rust/webext-storage/src/sync/bridge.rs
vendored
100
third_party/rust/webext-storage/src/sync/bridge.rs
vendored
@@ -5,30 +5,18 @@
|
||||
use anyhow::Result;
|
||||
use rusqlite::Transaction;
|
||||
use std::sync::{Arc, Weak};
|
||||
use sync15::bso::{IncomingBso, OutgoingBso};
|
||||
use sync15::engine::{ApplyResults, BridgedEngine as Sync15BridgedEngine};
|
||||
use sync15::bso::IncomingBso;
|
||||
use sync15::engine::ApplyResults;
|
||||
use sync_guid::Guid as SyncGuid;
|
||||
|
||||
use crate::db::{delete_meta, get_meta, put_meta, ThreadSafeStorageDb};
|
||||
use crate::schema;
|
||||
use crate::sync::incoming::{apply_actions, get_incoming, plan_incoming, stage_incoming};
|
||||
use crate::sync::outgoing::{get_outgoing, record_uploaded, stage_outgoing};
|
||||
use crate::WebExtStorageStore;
|
||||
|
||||
const LAST_SYNC_META_KEY: &str = "last_sync_time";
|
||||
const SYNC_ID_META_KEY: &str = "sync_id";
|
||||
|
||||
impl WebExtStorageStore {
|
||||
// Returns a bridged sync engine for this store.
|
||||
pub fn bridged_engine(self: Arc<Self>) -> Arc<WebExtStorageBridgedEngine> {
|
||||
let engine = Box::new(BridgedEngine::new(&self.db));
|
||||
let bridged_engine = WebExtStorageBridgedEngine {
|
||||
bridge_impl: engine,
|
||||
};
|
||||
Arc::new(bridged_engine)
|
||||
}
|
||||
}
|
||||
|
||||
/// A bridged engine implements all the methods needed to make the
|
||||
/// `storage.sync` store work with Desktop's Sync implementation.
|
||||
/// Conceptually, it's similar to `sync15::Store`, which we
|
||||
@@ -66,7 +54,7 @@ impl BridgedEngine {
|
||||
}
|
||||
}
|
||||
|
||||
impl Sync15BridgedEngine for BridgedEngine {
|
||||
impl sync15::engine::BridgedEngine for BridgedEngine {
|
||||
fn last_sync(&self) -> Result<i64> {
|
||||
let shared_db = self.thread_safe_storage_db()?;
|
||||
let db = shared_db.lock();
|
||||
@@ -206,88 +194,6 @@ impl Sync15BridgedEngine for BridgedEngine {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WebExtStorageBridgedEngine {
|
||||
bridge_impl: Box<dyn Sync15BridgedEngine>,
|
||||
}
|
||||
|
||||
impl WebExtStorageBridgedEngine {
|
||||
pub fn new(bridge_impl: Box<dyn Sync15BridgedEngine>) -> Self {
|
||||
Self { bridge_impl }
|
||||
}
|
||||
|
||||
pub fn last_sync(&self) -> Result<i64> {
|
||||
self.bridge_impl.last_sync()
|
||||
}
|
||||
|
||||
pub fn set_last_sync(&self, last_sync: i64) -> Result<()> {
|
||||
self.bridge_impl.set_last_sync(last_sync)
|
||||
}
|
||||
|
||||
pub fn sync_id(&self) -> Result<Option<String>> {
|
||||
self.bridge_impl.sync_id()
|
||||
}
|
||||
|
||||
pub fn reset_sync_id(&self) -> Result<String> {
|
||||
self.bridge_impl.reset_sync_id()
|
||||
}
|
||||
|
||||
pub fn ensure_current_sync_id(&self, sync_id: &str) -> Result<String> {
|
||||
self.bridge_impl.ensure_current_sync_id(sync_id)
|
||||
}
|
||||
|
||||
pub fn prepare_for_sync(&self, client_data: &str) -> Result<()> {
|
||||
self.bridge_impl.prepare_for_sync(client_data)
|
||||
}
|
||||
|
||||
pub fn store_incoming(&self, incoming: Vec<String>) -> Result<()> {
|
||||
self.bridge_impl
|
||||
.store_incoming(self.convert_incoming_bsos(incoming)?)
|
||||
}
|
||||
|
||||
pub fn apply(&self) -> Result<Vec<String>> {
|
||||
let apply_results = self.bridge_impl.apply()?;
|
||||
self.convert_outgoing_bsos(apply_results.records)
|
||||
}
|
||||
|
||||
pub fn set_uploaded(&self, server_modified_millis: i64, guids: Vec<SyncGuid>) -> Result<()> {
|
||||
self.bridge_impl
|
||||
.set_uploaded(server_modified_millis, &guids)
|
||||
}
|
||||
|
||||
pub fn sync_started(&self) -> Result<()> {
|
||||
self.bridge_impl.sync_started()
|
||||
}
|
||||
|
||||
pub fn sync_finished(&self) -> Result<()> {
|
||||
self.bridge_impl.sync_finished()
|
||||
}
|
||||
|
||||
pub fn reset(&self) -> Result<()> {
|
||||
self.bridge_impl.reset()
|
||||
}
|
||||
|
||||
pub fn wipe(&self) -> Result<()> {
|
||||
self.bridge_impl.wipe()
|
||||
}
|
||||
|
||||
fn convert_incoming_bsos(&self, incoming: Vec<String>) -> Result<Vec<IncomingBso>> {
|
||||
let mut bsos = Vec::with_capacity(incoming.len());
|
||||
for inc in incoming {
|
||||
bsos.push(serde_json::from_str::<IncomingBso>(&inc)?);
|
||||
}
|
||||
Ok(bsos)
|
||||
}
|
||||
|
||||
// Encode OutgoingBso's into JSON for UniFFI
|
||||
fn convert_outgoing_bsos(&self, outgoing: Vec<OutgoingBso>) -> Result<Vec<String>> {
|
||||
let mut bsos = Vec::with_capacity(outgoing.len());
|
||||
for e in outgoing {
|
||||
bsos.push(serde_json::to_string(&e)?);
|
||||
}
|
||||
Ok(bsos)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<anyhow::Error> for crate::error::Error {
|
||||
fn from(value: anyhow::Error) -> Self {
|
||||
crate::error::Error::SyncError(value.to_string())
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
pub(crate) mod bridge;
|
||||
mod bridge;
|
||||
mod incoming;
|
||||
mod outgoing;
|
||||
|
||||
@@ -17,6 +17,7 @@ use serde_derive::*;
|
||||
use sql_support::ConnExt;
|
||||
use sync_guid::Guid as SyncGuid;
|
||||
|
||||
pub use bridge::BridgedEngine;
|
||||
use incoming::IncomingAction;
|
||||
|
||||
type JsonMap = serde_json::Map<String, serde_json::Value>;
|
||||
|
||||
@@ -5,9 +5,6 @@
|
||||
[Custom]
|
||||
typedef string JsonValue;
|
||||
|
||||
[Custom]
|
||||
typedef string Guid;
|
||||
|
||||
namespace webextstorage {
|
||||
|
||||
};
|
||||
@@ -25,11 +22,6 @@ interface WebExtStorageApiError {
|
||||
QuotaError(QuotaReason reason);
|
||||
};
|
||||
|
||||
dictionary SyncedExtensionChange {
|
||||
string ext_id;
|
||||
string changes;
|
||||
};
|
||||
|
||||
dictionary StorageValueChange {
|
||||
string key;
|
||||
JsonValue? old_value;
|
||||
@@ -50,64 +42,9 @@ interface WebExtStorageStore {
|
||||
[Throws=WebExtStorageApiError]
|
||||
JsonValue get([ByRef] string ext_id, JsonValue keys);
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
u64 get_bytes_in_use([ByRef] string ext_id, JsonValue keys);
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
void close();
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
StorageChanges remove([ByRef] string ext_id, JsonValue keys);
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
StorageChanges clear([ByRef] string ext_id);
|
||||
|
||||
[Self=ByArc]
|
||||
WebExtStorageBridgedEngine bridged_engine();
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
sequence<SyncedExtensionChange> get_synced_changes();
|
||||
};
|
||||
|
||||
// Note the canonical docs for this are in https://github.com/mozilla/application-services/blob/main/components/sync15/src/engine/bridged_engine.rs
|
||||
// NOTE: all timestamps here are milliseconds.
|
||||
interface WebExtStorageBridgedEngine {
|
||||
[Throws=WebExtStorageApiError]
|
||||
i64 last_sync();
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
void set_last_sync(i64 last_sync);
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
string? sync_id();
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
string reset_sync_id();
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
string ensure_current_sync_id([ByRef]string new_sync_id);
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
void prepare_for_sync([ByRef]string client_data);
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
void sync_started();
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
void store_incoming(sequence<string> incoming);
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
sequence<string> apply();
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
void set_uploaded(i64 server_modified_millis, sequence<Guid> guids);
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
void sync_finished();
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
void reset();
|
||||
|
||||
[Throws=WebExtStorageApiError]
|
||||
void wipe();
|
||||
};
|
||||
|
||||
@@ -4,108 +4,177 @@
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
const STORAGE_SYNC_ENABLED_PREF = "webextensions.storage.sync.enabled";
|
||||
|
||||
import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
|
||||
|
||||
const NS_ERROR_DOM_QUOTA_EXCEEDED_ERR = 0x80530016;
|
||||
|
||||
/** @type {Lazy} */
|
||||
const lazy = {};
|
||||
|
||||
ChromeUtils.defineESModuleGetters(lazy, {
|
||||
ExtensionCommon: "resource://gre/modules/ExtensionCommon.sys.mjs",
|
||||
ExtensionUtils: "resource://gre/modules/ExtensionUtils.sys.mjs",
|
||||
storageSyncService:
|
||||
"resource://gre/modules/ExtensionStorageComponents.sys.mjs",
|
||||
QuotaError: "resource://gre/modules/RustWebextstorage.sys.mjs",
|
||||
// We might end up falling back to kinto...
|
||||
extensionStorageSyncKinto:
|
||||
"resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs",
|
||||
});
|
||||
|
||||
XPCOMUtils.defineLazyPreferenceGetter(
|
||||
lazy,
|
||||
"prefPermitsStorageSync",
|
||||
STORAGE_SYNC_ENABLED_PREF,
|
||||
true
|
||||
);
|
||||
|
||||
// This xpcom service implements a "bridge" from the JS world to the Rust world.
|
||||
// It sets up the database and implements a callback-based version of the
|
||||
// browser.storage API.
|
||||
ChromeUtils.defineLazyGetter(lazy, "storageSvc", () =>
|
||||
Cc["@mozilla.org/extensions/storage/sync;1"]
|
||||
.getService(Ci.nsIInterfaceRequestor)
|
||||
.getInterface(Ci.mozIExtensionStorageArea)
|
||||
);
|
||||
|
||||
// The interfaces which define the callbacks used by the bridge. There's a
|
||||
// callback for success, failure, and to record data changes.
|
||||
function ExtensionStorageApiCallback(resolve, reject, changeCallback) {
|
||||
this.resolve = resolve;
|
||||
this.reject = reject;
|
||||
this.changeCallback = changeCallback;
|
||||
}
|
||||
|
||||
ExtensionStorageApiCallback.prototype = {
|
||||
QueryInterface: ChromeUtils.generateQI([
|
||||
"mozIExtensionStorageListener",
|
||||
"mozIExtensionStorageCallback",
|
||||
]),
|
||||
|
||||
handleSuccess(result) {
|
||||
this.resolve(result ? JSON.parse(result) : null);
|
||||
},
|
||||
|
||||
handleError(code, message) {
|
||||
/** @type {Error & { code?: number }} */
|
||||
let e = new Error(message);
|
||||
e.code = code;
|
||||
Cu.reportError(e);
|
||||
this.reject(e);
|
||||
},
|
||||
|
||||
onChanged(extId, json) {
|
||||
if (this.changeCallback && json) {
|
||||
try {
|
||||
this.changeCallback(extId, JSON.parse(json));
|
||||
} catch (ex) {
|
||||
Cu.reportError(ex);
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
// The backing implementation of the browser.storage.sync web extension API.
|
||||
export class ExtensionStorageSync {
|
||||
constructor() {
|
||||
this.listeners = new Map();
|
||||
// We are optimistic :) If we ever see the special nsresult which indicates
|
||||
// migration failure, it will become false. In practice, this will only ever
|
||||
// happen on the first operation.
|
||||
this.migrationOk = true;
|
||||
this.backend = "rust";
|
||||
}
|
||||
|
||||
async #getRustStore() {
|
||||
return await lazy.storageSyncService.getStorageAreaInstance();
|
||||
}
|
||||
|
||||
async callRustStoreFn(fnName, extension, ...args) {
|
||||
let sargs = args.map(val => JSON.stringify(val));
|
||||
|
||||
try {
|
||||
let extId = extension.id;
|
||||
let rustStore = await this.#getRustStore();
|
||||
switch (fnName) {
|
||||
case "set": {
|
||||
let changes = this._parseRustStorageValueChangeList(
|
||||
await rustStore.set(extId, ...sargs)
|
||||
);
|
||||
this.notifyListeners(extId, changes);
|
||||
return null;
|
||||
}
|
||||
case "remove": {
|
||||
let changes = this._parseRustStorageValueChangeList(
|
||||
await rustStore.remove(extId, ...sargs)
|
||||
);
|
||||
this.notifyListeners(extId, changes);
|
||||
return null;
|
||||
}
|
||||
case "clear": {
|
||||
let changes = this._parseRustStorageValueChangeList(
|
||||
await rustStore.clear(extId)
|
||||
);
|
||||
this.notifyListeners(extId, changes);
|
||||
return null;
|
||||
}
|
||||
case "get": {
|
||||
let result = await rustStore.get(extId, ...sargs);
|
||||
return JSON.parse(result);
|
||||
}
|
||||
case "getBytesInUse": {
|
||||
let result = await rustStore.getBytesInUse(extId, ...sargs);
|
||||
return JSON.parse(result);
|
||||
}
|
||||
}
|
||||
} catch (ex) {
|
||||
// The only "public" exception here is for quota failure - all others
|
||||
// are sanitized.
|
||||
let sanitized =
|
||||
ex instanceof lazy.QuotaError
|
||||
? // The same message as the local IDB implementation
|
||||
"QuotaExceededError: storage.sync API call exceeded its quota limitations."
|
||||
: // The standard, generic extension error.
|
||||
"An unexpected error occurred";
|
||||
throw new lazy.ExtensionUtils.ExtensionError(sanitized);
|
||||
// The main entry-point to our bridge. It performs some important roles:
|
||||
// * Ensures the API is allowed to be used.
|
||||
// * Works out what "extension id" to use.
|
||||
// * Turns the callback API into a promise API.
|
||||
async _promisify(fnName, extension, context, ...args) {
|
||||
let extId = extension.id;
|
||||
if (lazy.prefPermitsStorageSync !== true) {
|
||||
throw new lazy.ExtensionUtils.ExtensionError(
|
||||
`Please set ${STORAGE_SYNC_ENABLED_PREF} to true in about:config`
|
||||
);
|
||||
}
|
||||
|
||||
if (this.migrationOk) {
|
||||
// We can call ours.
|
||||
try {
|
||||
return await new Promise((resolve, reject) => {
|
||||
let callback = new ExtensionStorageApiCallback(
|
||||
resolve,
|
||||
reject,
|
||||
(extId, changes) => this.notifyListeners(extId, changes)
|
||||
);
|
||||
let sargs = args.map(val => JSON.stringify(val));
|
||||
lazy.storageSvc[fnName](extId, ...sargs, callback);
|
||||
});
|
||||
} catch (ex) {
|
||||
if (ex.code != Cr.NS_ERROR_CANNOT_CONVERT_DATA) {
|
||||
// Some non-migration related error we want to sanitize and propagate.
|
||||
// The only "public" exception here is for quota failure - all others
|
||||
// are sanitized.
|
||||
let sanitized =
|
||||
ex.code == NS_ERROR_DOM_QUOTA_EXCEEDED_ERR
|
||||
? // The same message as the local IDB implementation
|
||||
`QuotaExceededError: storage.sync API call exceeded its quota limitations.`
|
||||
: // The standard, generic extension error.
|
||||
"An unexpected error occurred";
|
||||
throw new lazy.ExtensionUtils.ExtensionError(sanitized);
|
||||
}
|
||||
// This means "migrate failed" so we must fall back to kinto.
|
||||
Cu.reportError(
|
||||
"migration of extension-storage failed - will fall back to kinto"
|
||||
);
|
||||
this.migrationOk = false;
|
||||
}
|
||||
}
|
||||
// We've detected failure to migrate, so we want to use kinto.
|
||||
return lazy.extensionStorageSyncKinto[fnName](extension, ...args, context);
|
||||
}
|
||||
|
||||
async set(extension, items) {
|
||||
return await this.callRustStoreFn("set", extension, items);
|
||||
set(extension, items, context) {
|
||||
return this._promisify("set", extension, context, items);
|
||||
}
|
||||
|
||||
async remove(extension, keys) {
|
||||
return await this.callRustStoreFn("remove", extension, keys);
|
||||
remove(extension, keys, context) {
|
||||
return this._promisify("remove", extension, context, keys);
|
||||
}
|
||||
|
||||
async clear(extension) {
|
||||
return await this.callRustStoreFn("clear", extension);
|
||||
clear(extension, context) {
|
||||
return this._promisify("clear", extension, context);
|
||||
}
|
||||
|
||||
async clearOnUninstall(extensionId) {
|
||||
clearOnUninstall(extensionId) {
|
||||
if (!this.migrationOk) {
|
||||
// If the rust-based backend isn't being used,
|
||||
// no need to clear it.
|
||||
return;
|
||||
}
|
||||
// Resolve the returned promise once the request has been either resolved
|
||||
// or rejected (and report the error on the browser console in case of
|
||||
// unexpected clear failures on addon uninstall).
|
||||
try {
|
||||
let rustStore = await this.#getRustStore();
|
||||
await rustStore.clear(extensionId);
|
||||
} catch (err) {
|
||||
Cu.reportError(err);
|
||||
}
|
||||
return new Promise(resolve => {
|
||||
const callback = new ExtensionStorageApiCallback(
|
||||
resolve,
|
||||
err => {
|
||||
Cu.reportError(err);
|
||||
resolve();
|
||||
},
|
||||
// empty changeCallback (no need to notify the extension
|
||||
// while clearing the extension on uninstall).
|
||||
() => {}
|
||||
);
|
||||
lazy.storageSvc.clear(extensionId, callback);
|
||||
});
|
||||
}
|
||||
|
||||
async get(extension, spec) {
|
||||
return await this.callRustStoreFn("get", extension, spec);
|
||||
get(extension, spec, context) {
|
||||
return this._promisify("get", extension, context, spec);
|
||||
}
|
||||
|
||||
async getBytesInUse(extension, keys) {
|
||||
return await this.callRustStoreFn("getBytesInUse", extension, keys);
|
||||
getBytesInUse(extension, keys, context) {
|
||||
return this._promisify("getBytesInUse", extension, context, keys);
|
||||
}
|
||||
|
||||
addOnChangedListener(extension, listener) {
|
||||
@@ -122,23 +191,8 @@ export class ExtensionStorageSync {
|
||||
}
|
||||
}
|
||||
|
||||
_parseRustStorageValueChangeList(changeSets) {
|
||||
let changes = {};
|
||||
for (let change of changeSets.changes) {
|
||||
changes[change.key] = {};
|
||||
if (change.oldValue) {
|
||||
changes[change.key].oldValue = JSON.parse(change.oldValue);
|
||||
}
|
||||
if (change.newValue) {
|
||||
changes[change.key].newValue = JSON.parse(change.newValue);
|
||||
}
|
||||
}
|
||||
return changes;
|
||||
}
|
||||
|
||||
notifyListeners(extId, changes) {
|
||||
let listeners = this.listeners.get(extId) || new Set();
|
||||
|
||||
if (listeners) {
|
||||
for (let listener of listeners) {
|
||||
lazy.ExtensionCommon.runSafeSyncWithoutClone(listener, changes);
|
||||
|
||||
@@ -15,6 +15,7 @@ const KINTO_PROD_SERVER_URL =
|
||||
"https://webextensions.settings.services.mozilla.com/v1";
|
||||
const KINTO_DEFAULT_SERVER_URL = KINTO_PROD_SERVER_URL;
|
||||
|
||||
const STORAGE_SYNC_ENABLED_PREF = "webextensions.storage.sync.enabled";
|
||||
const STORAGE_SYNC_SERVER_URL_PREF = "webextensions.storage.sync.serverURL";
|
||||
const STORAGE_SYNC_SCOPE = "sync:addon_storage";
|
||||
const STORAGE_SYNC_CRYPTO_COLLECTION_NAME = "storage-sync-crypto";
|
||||
@@ -62,6 +63,12 @@ ChromeUtils.defineLazyGetter(lazy, "fxAccounts", () => {
|
||||
).getFxAccountsSingleton();
|
||||
});
|
||||
|
||||
XPCOMUtils.defineLazyPreferenceGetter(
|
||||
lazy,
|
||||
"prefPermitsStorageSync",
|
||||
STORAGE_SYNC_ENABLED_PREF,
|
||||
true
|
||||
);
|
||||
XPCOMUtils.defineLazyPreferenceGetter(
|
||||
lazy,
|
||||
"prefStorageSyncServerURL",
|
||||
@@ -1196,6 +1203,11 @@ export class ExtensionStorageSyncKinto {
|
||||
* @returns {Promise<Collection>}
|
||||
*/
|
||||
getCollection(extension, context) {
|
||||
if (lazy.prefPermitsStorageSync !== true) {
|
||||
return Promise.reject({
|
||||
message: `Please set ${STORAGE_SYNC_ENABLED_PREF} to true in about:config`,
|
||||
});
|
||||
}
|
||||
this.registerInUse(extension, context);
|
||||
return openCollection(extension);
|
||||
}
|
||||
|
||||
@@ -7,9 +7,27 @@ This document describes the implementation of the the `storage.sync` part of the
|
||||
<https://developer.mozilla.org/docs/Mozilla/Add-ons/WebExtensions/API/storage>`_.
|
||||
The implementation lives in the `toolkit/components/extensions/storage folder <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage>`_
|
||||
|
||||
Ideally you would already know about Rust and XPCOM - `see this doc for more details <../../../../writing-rust-code/index.html>`_
|
||||
|
||||
At a very high-level, the system looks like:
|
||||
|
||||
.. mermaid::
|
||||
|
||||
graph LR
|
||||
A[Extensions API]
|
||||
A --> B[Storage JS API]
|
||||
B --> C{magic}
|
||||
C --> D[app-services component]
|
||||
|
||||
Where "magic" is actually the most interesting part and the primary focus of this document.
|
||||
|
||||
Note: The general mechanism described below is also used for other Rust components from the
|
||||
app-services team - for example, "dogear" uses a similar mechanism, and the sync engines
|
||||
too (but with even more complexity) to manage the threads. Unfortunately, at time of writing,
|
||||
no code is shared and it's not clear how we would, but this might change as more Rust lands.
|
||||
|
||||
The app-services component `lives on github <https://github.com/mozilla/application-services/blob/main/components/webext-storage>`_.
|
||||
There are docs that describe `how to update/vendor this (and all) external rust code <../../../../build/buildsystem/rust.html>`_ you might be interested in.
|
||||
We use UniFFI to generate JS bindings for the components. More details about UniFFI can be found in `these docs <https://searchfox.org/mozilla-central/source/docs/writing-rust-code/uniffi.md>`_.
|
||||
|
||||
To set the scene, let's look at the parts exposed to WebExtensions first; there are lots of
|
||||
moving part there too.
|
||||
@@ -19,7 +37,12 @@ WebExtension API
|
||||
|
||||
The WebExtension API is owned by the addons team. The implementation of this API is quite complex
|
||||
as it involves multiple processes, but for the sake of this document, we can consider the entry-point
|
||||
into the WebExtension Storage API as being `parent/ext-storage.js <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/parent/ext-storage.js>`_.
|
||||
into the WebExtension Storage API as being `parent/ext-storage.js <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/parent/ext-storage.js>`_
|
||||
|
||||
This entry-point ends up using the implementation in the
|
||||
`ExtensionStorageSync JS class <https://searchfox.org/mozilla-central/rev/9028b0458cc1f432870d2996b186b0938dda734a/toolkit/components/extensions/ExtensionStorageSync.jsm#84>`_.
|
||||
This class/module has complexity for things like migration from the earlier Kinto-based backend,
|
||||
but importantly, code to adapt a callback API into a promise based one.
|
||||
|
||||
Overview of the API
|
||||
###################
|
||||
@@ -34,3 +57,171 @@ The semantics of the API are beyond this doc but are
|
||||
As you will see in those docs, the API is promise-based, but the rust implementation is fully
|
||||
synchronous and Rust knows nothing about Javascript promises - so this system converts
|
||||
the callback-based API to a promise-based one.
|
||||
|
||||
xpcom as the interface to Rust
|
||||
##############################
|
||||
|
||||
xpcom is old Mozilla technology that uses C++ "vtables" to implement "interfaces", which are
|
||||
described in IDL files. While this traditionally was used to interface
|
||||
C++ and Javascript, we are leveraging existing support for Rust. The interface we are
|
||||
exposing is described in `mozIExtensionStorageArea.idl <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/mozIExtensionStorageArea.idl>`_
|
||||
|
||||
The main interface of interest in this IDL file is `mozIExtensionStorageArea`.
|
||||
This interface defines the functionality - and is the first layer in the sync to async model.
|
||||
For example, this interface defines the following method:
|
||||
|
||||
.. code-block:: rust
|
||||
|
||||
interface mozIExtensionStorageArea : nsISupports {
|
||||
...
|
||||
// Sets one or more key-value pairs specified in `json` for the
|
||||
// `extensionId`...
|
||||
void set(in AUTF8String extensionId,
|
||||
in AUTF8String json,
|
||||
in mozIExtensionStorageCallback callback);
|
||||
|
||||
As you will notice, the 3rd arg is another interface, `mozIExtensionStorageCallback`, also
|
||||
defined in that IDL file. This is a small, generic interface defined as:
|
||||
|
||||
.. code-block:: cpp
|
||||
|
||||
interface mozIExtensionStorageCallback : nsISupports {
|
||||
// Called when the operation completes. Operations that return a result,
|
||||
// like `get`, will pass a `UTF8String` variant. Those that don't return
|
||||
// anything, like `set` or `remove`, will pass a `null` variant.
|
||||
void handleSuccess(in nsIVariant result);
|
||||
|
||||
// Called when the operation fails.
|
||||
void handleError(in nsresult code, in AUTF8String message);
|
||||
};
|
||||
|
||||
Note that this delivers all results and errors, so must be capable of handling
|
||||
every result type, which for some APIs may be problematic - but we are very lucky with this API
|
||||
that this simple XPCOM callback interface is capable of reasonably representing the return types
|
||||
from every function in the `mozIExtensionStorageArea` interface.
|
||||
|
||||
(There's another interface, `mozIExtensionStorageListener` which is typically
|
||||
also implemented by the actual callback to notify the extension about changes,
|
||||
but that's beyond the scope of this doc.)
|
||||
|
||||
*Note the thread model here is async* - the `set` call will return immediately, and later, on
|
||||
the main thread, we will call the callback param with the result of the operation.
|
||||
|
||||
So under the hood, what happens is something like:
|
||||
|
||||
.. mermaid::
|
||||
|
||||
sequenceDiagram
|
||||
Extension->>ExtensionStorageSync: call `set` and give me a promise
|
||||
ExtensionStorageSync->>xpcom: call `set`, supplying new data and a callback
|
||||
ExtensionStorageSync-->>Extension: your promise
|
||||
xpcom->>xpcom: thread magic in the "bridge"
|
||||
xpcom-->>ExtensionStorageSync: callback!
|
||||
ExtensionStorageSync-->>Extension: promise resolved
|
||||
|
||||
So onto the thread magic in the bridge!
|
||||
|
||||
webext_storage_bridge
|
||||
#####################
|
||||
|
||||
The `webext_storage_bridge <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge>`_
|
||||
is a Rust crate which, as implied by the name, is a "bridge" between this Javascript/XPCOM world to
|
||||
the actual `webext-storage <https://github.com/mozilla/application-services/tree/main/components/webext-storage>`_ crate.
|
||||
|
||||
lib.rs
|
||||
------
|
||||
|
||||
Is the entry-point - it defines the xpcom "factory function" -
|
||||
an `extern "C"` function which is called by xpcom to create the Rust object
|
||||
implementing `mozIExtensionStorageArea` using existing gecko support.
|
||||
|
||||
area.rs
|
||||
-------
|
||||
|
||||
This module defines the interface itself. For example, inside that file you will find:
|
||||
|
||||
.. code-block:: rust
|
||||
|
||||
impl StorageSyncArea {
|
||||
...
|
||||
|
||||
xpcom_method!(
|
||||
set => Set(
|
||||
ext_id: *const ::nsstring::nsACString,
|
||||
json: *const ::nsstring::nsACString,
|
||||
callback: *const mozIExtensionStorageCallback
|
||||
)
|
||||
);
|
||||
/// Sets one or more key-value pairs.
|
||||
fn set(
|
||||
&self,
|
||||
ext_id: &nsACString,
|
||||
json: &nsACString,
|
||||
callback: &mozIExtensionStorageCallback,
|
||||
) -> Result<()> {
|
||||
self.dispatch(
|
||||
Punt::Set {
|
||||
ext_id: str::from_utf8(&*ext_id)?.into(),
|
||||
value: serde_json::from_str(str::from_utf8(&*json)?)?,
|
||||
},
|
||||
callback,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
Of interest here:
|
||||
|
||||
* `xpcom_method` is a Rust macro, and part of the existing xpcom integration which already exists
|
||||
in gecko. It declares the xpcom vtable method described in the IDL.
|
||||
|
||||
* The `set` function is the implementation - it does string conversions and the JSON parsing
|
||||
on the main thread, then does the work via the supplied callback param, `self.dispatch` and a `Punt`.
|
||||
|
||||
* The `dispatch` method dispatches to another thread, leveraging existing in-tree `moz_task <https://searchfox.org/mozilla-central/source/xpcom/rust/moz_task>`_ support, shifting the `Punt` to another thread and making the callback when done.
|
||||
|
||||
Punt
|
||||
----
|
||||
|
||||
`Punt` is a whimsical name somewhat related to a "bridge" - it carries things across and back.
|
||||
|
||||
It is a fairly simple enum in `punt.rs <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs>`_.
|
||||
It's really just a restatement of the API we expose suitable for moving across threads. In short, the `Punt` is created on the main thread,
|
||||
then sent to the background thread where the actual operation runs via a `PuntTask` and returns a `PuntResult`.
|
||||
|
||||
There's a few dances that go on, but the end result is that `inner_run() <https://searchfox.org/mozilla-central/source/toolkit/components/extensions/storage/webext_storage_bridge/src/punt.rs>`_
|
||||
gets executed on the background thread - so for `Set`:
|
||||
|
||||
.. code-block:: rust
|
||||
|
||||
Punt::Set { ext_id, value } => {
|
||||
PuntResult::with_change(&ext_id, self.store()?.get()?.set(&ext_id, value)?)?
|
||||
}
|
||||
|
||||
Here, `self.store()` is a wrapper around the actual Rust implementation from app-services with
|
||||
various initialization and mutex dances involved - see `store.rs`.
|
||||
ie, this function is calling our Rust implementation and stashing the result in a `PuntResult`
|
||||
|
||||
The `PuntResult` is private to that file but is a simple struct that encapsulates both
|
||||
the actual result of the function (also a set of changes to send to observers, but that's
|
||||
beyond this doc).
|
||||
|
||||
Ultimately, the `PuntResult` ends up back on the main thread once the call is complete
|
||||
and arranges to callback the JS implementation, which in turn resolves the promise created in `ExtensionStorageSync.sys.mjs`
|
||||
|
||||
End result:
|
||||
-----------
|
||||
|
||||
.. mermaid::
|
||||
|
||||
sequenceDiagram
|
||||
Extension->>ExtensionStorageSync: call `set` and give me a promise
|
||||
ExtensionStorageSync->>xpcom - bridge main thread: call `set`, supplying new data and a callback
|
||||
ExtensionStorageSync-->>Extension: your promise
|
||||
xpcom - bridge main thread->>moz_task worker thread: Punt this
|
||||
moz_task worker thread->>webext-storage: write this data to the database
|
||||
webext-storage->>webext-storage: done: result/error and observers
|
||||
webext-storage-->>moz_task worker thread: ...
|
||||
moz_task worker thread-->>xpcom - bridge main thread: PuntResult
|
||||
xpcom - bridge main thread-->>ExtensionStorageSync: callback!
|
||||
ExtensionStorageSync-->>Extension: promise resolved
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef mozilla_extensions_storage_ExtensionStorageComponents_h_
|
||||
#define mozilla_extensions_storage_ExtensionStorageComponents_h_
|
||||
|
||||
#include "mozIExtensionStorageArea.h"
|
||||
#include "nsCOMPtr.h"
|
||||
|
||||
extern "C" {
|
||||
|
||||
// Implemented in Rust, in the `webext_storage_bridge` crate.
|
||||
nsresult NS_NewExtensionStorageSyncArea(mozIExtensionStorageArea** aResult);
|
||||
|
||||
} // extern "C"
|
||||
|
||||
namespace mozilla {
|
||||
namespace extensions {
|
||||
namespace storage {
|
||||
|
||||
// The C++ constructor for a `storage.sync` area. This wrapper exists because
|
||||
// `components.conf` requires a component class constructor to return an
|
||||
// `already_AddRefed<T>`, but Rust doesn't have such a type. So we call the
|
||||
// Rust constructor using a `nsCOMPtr` (which is compatible with Rust's
|
||||
// `xpcom::RefPtr`) out param, and return that.
|
||||
already_AddRefed<mozIExtensionStorageArea> NewSyncArea() {
|
||||
nsCOMPtr<mozIExtensionStorageArea> storage;
|
||||
nsresult rv = NS_NewExtensionStorageSyncArea(getter_AddRefs(storage));
|
||||
if (NS_WARN_IF(NS_FAILED(rv))) {
|
||||
return nullptr;
|
||||
}
|
||||
return storage.forget();
|
||||
}
|
||||
|
||||
} // namespace storage
|
||||
} // namespace extensions
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // mozilla_extensions_storage_ExtensionStorageComponents_h_
|
||||
@@ -6,30 +6,113 @@ const lazy = {};
|
||||
|
||||
ChromeUtils.defineESModuleGetters(lazy, {
|
||||
AsyncShutdown: "resource://gre/modules/AsyncShutdown.sys.mjs",
|
||||
WebExtStorageStore: "resource://gre/modules/RustWebextstorage.sys.mjs",
|
||||
FileUtils: "resource://gre/modules/FileUtils.sys.mjs",
|
||||
});
|
||||
|
||||
function StorageSyncService() {}
|
||||
const StorageSyncArea = Components.Constructor(
|
||||
"@mozilla.org/extensions/storage/internal/sync-area;1",
|
||||
"mozIConfigurableExtensionStorageArea",
|
||||
"configure"
|
||||
);
|
||||
|
||||
/**
|
||||
* An XPCOM service for the WebExtension `storage.sync` API. The service manages
|
||||
* a storage area for storing and syncing extension data.
|
||||
*
|
||||
* The service configures its storage area with the database path, and hands
|
||||
* out references to the configured area via `getInterface`. It also registers
|
||||
* a shutdown blocker to automatically tear down the area.
|
||||
*
|
||||
* ## What's the difference between `storage/internal/storage-sync-area;1` and
|
||||
* `storage/sync;1`?
|
||||
*
|
||||
* `components.conf` has two classes:
|
||||
* `@mozilla.org/extensions/storage/internal/sync-area;1` and
|
||||
* `@mozilla.org/extensions/storage/sync;1`.
|
||||
*
|
||||
* The `storage/internal/sync-area;1` class is implemented in Rust, and can be
|
||||
* instantiated using `createInstance` and `Components.Constructor`. It's not
|
||||
* a singleton, so creating a new instance will create a new `storage.sync`
|
||||
* area, with its own database connection. It's useful for testing, but not
|
||||
* meant to be used outside of this module.
|
||||
*
|
||||
* The `storage/sync;1` class is implemented in this file. It's a singleton,
|
||||
* ensuring there's only one `storage.sync` area, with one database connection.
|
||||
* The service implements `nsIInterfaceRequestor`, so callers can access the
|
||||
* storage interface like this:
|
||||
*
|
||||
* let storageSyncArea = Cc["@mozilla.org/extensions/storage/sync;1"]
|
||||
* .getService(Ci.nsIInterfaceRequestor)
|
||||
* .getInterface(Ci.mozIExtensionStorageArea);
|
||||
*
|
||||
* ...And the Sync interface like this:
|
||||
*
|
||||
* let extensionStorageEngine = Cc["@mozilla.org/extensions/storage/sync;1"]
|
||||
* .getService(Ci.nsIInterfaceRequestor)
|
||||
* .getInterface(Ci.mozIBridgedSyncEngine);
|
||||
*
|
||||
* @class
|
||||
*/
|
||||
export function StorageSyncService() {
|
||||
if (StorageSyncService._singleton) {
|
||||
return StorageSyncService._singleton;
|
||||
}
|
||||
|
||||
let file = new lazy.FileUtils.File(
|
||||
PathUtils.join(PathUtils.profileDir, "storage-sync-v2.sqlite")
|
||||
);
|
||||
let kintoFile = new lazy.FileUtils.File(
|
||||
PathUtils.join(PathUtils.profileDir, "storage-sync.sqlite")
|
||||
);
|
||||
this._storageArea = new StorageSyncArea(file, kintoFile);
|
||||
|
||||
// Register a blocker to close the storage connection on shutdown.
|
||||
this._shutdownBound = () => this._shutdown();
|
||||
lazy.AsyncShutdown.profileChangeTeardown.addBlocker(
|
||||
"StorageSyncService: shutdown",
|
||||
this._shutdownBound
|
||||
);
|
||||
|
||||
StorageSyncService._singleton = this;
|
||||
}
|
||||
|
||||
StorageSyncService._singleton = null;
|
||||
|
||||
StorageSyncService.prototype = {
|
||||
_storageAreaPromise: null,
|
||||
async getStorageAreaInstance() {
|
||||
if (!this._storageAreaPromise) {
|
||||
let path = PathUtils.join(PathUtils.profileDir, "storage-sync-v2.sqlite");
|
||||
this._storageAreaPromise = lazy.WebExtStorageStore.init(path);
|
||||
QueryInterface: ChromeUtils.generateQI(["nsIInterfaceRequestor"]),
|
||||
|
||||
lazy.AsyncShutdown.profileChangeTeardown.addBlocker(
|
||||
"StorageSyncService: shutdown",
|
||||
async () => {
|
||||
let store = await this._storageAreaPromise;
|
||||
await store.close();
|
||||
this._storageAreaPromise = null;
|
||||
}
|
||||
// Returns the storage and syncing interfaces. This just hands out a
|
||||
// reference to the underlying storage area, with a quick check to make sure
|
||||
// that callers are asking for the right interfaces.
|
||||
getInterface(iid) {
|
||||
if (
|
||||
iid.equals(Ci.mozIExtensionStorageArea) ||
|
||||
iid.equals(Ci.mozIBridgedSyncEngine)
|
||||
) {
|
||||
return this._storageArea.QueryInterface(iid);
|
||||
}
|
||||
throw Components.Exception(
|
||||
"This interface isn't implemented",
|
||||
Cr.NS_ERROR_NO_INTERFACE
|
||||
);
|
||||
},
|
||||
|
||||
// Tears down the storage area and lifts the blocker so that shutdown can
|
||||
// continue.
|
||||
async _shutdown() {
|
||||
try {
|
||||
await new Promise((resolve, reject) => {
|
||||
this._storageArea.teardown({
|
||||
handleSuccess: resolve,
|
||||
handleError(code, message) {
|
||||
reject(Components.Exception(message, code));
|
||||
},
|
||||
});
|
||||
});
|
||||
} finally {
|
||||
lazy.AsyncShutdown.profileChangeTeardown.removeBlocker(
|
||||
this._shutdownBound
|
||||
);
|
||||
}
|
||||
|
||||
return await this._storageAreaPromise;
|
||||
},
|
||||
};
|
||||
|
||||
export var storageSyncService = new StorageSyncService();
|
||||
|
||||
22
toolkit/components/extensions/storage/components.conf
Normal file
22
toolkit/components/extensions/storage/components.conf
Normal file
@@ -0,0 +1,22 @@
|
||||
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
|
||||
# vim: set filetype=python:
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
Classes = [
|
||||
{
|
||||
'cid': '{f1e424f2-67fe-4f69-a8f8-3993a71f44fa}',
|
||||
'contract_ids': ['@mozilla.org/extensions/storage/internal/sync-area;1'],
|
||||
'type': 'mozIExtensionStorageArea',
|
||||
'headers': ['mozilla/extensions/storage/ExtensionStorageComponents.h'],
|
||||
'constructor': 'mozilla::extensions::storage::NewSyncArea',
|
||||
},
|
||||
{
|
||||
'cid': '{5b7047b4-fe17-4661-8e13-871402bc2023}',
|
||||
'contract_ids': ['@mozilla.org/extensions/storage/sync;1'],
|
||||
'esModule': 'resource://gre/modules/ExtensionStorageComponents.sys.mjs',
|
||||
'constructor': 'StorageSyncService',
|
||||
'singleton': True,
|
||||
},
|
||||
]
|
||||
@@ -7,13 +7,27 @@
|
||||
with Files("**"):
|
||||
BUG_COMPONENT = ("WebExtensions", "Storage")
|
||||
|
||||
XPIDL_MODULE = "webextensions-storage"
|
||||
|
||||
XPIDL_SOURCES += [
|
||||
"mozIExtensionStorageArea.idl",
|
||||
]
|
||||
|
||||
# Don't build the Rust `storage.sync` bridge for GeckoView, as it will expose
|
||||
# a delegate for consumers to use instead. Android Components can then provide
|
||||
# an implementation of the delegate that's backed by the Rust component. For
|
||||
# details, please see bug 1626506, comment 4.
|
||||
if CONFIG["MOZ_WIDGET_TOOLKIT"] != "android":
|
||||
EXPORTS.mozilla.extensions.storage += [
|
||||
"ExtensionStorageComponents.h",
|
||||
]
|
||||
|
||||
EXTRA_JS_MODULES += [
|
||||
"ExtensionStorageComponents.sys.mjs",
|
||||
]
|
||||
|
||||
XPCOM_MANIFESTS += [
|
||||
"components.conf",
|
||||
]
|
||||
|
||||
FINAL_LIBRARY = "xul"
|
||||
|
||||
@@ -0,0 +1,127 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "nsISupports.idl"
|
||||
|
||||
interface mozIExtensionStorageCallback;
|
||||
interface nsIFile;
|
||||
interface nsIVariant;
|
||||
|
||||
// Implements the operations needed to support the `StorageArea` WebExtension
|
||||
// API.
|
||||
[scriptable, uuid(d8eb3ff1-9b4b-435a-99ca-5b8cbaba2420)]
|
||||
interface mozIExtensionStorageArea : nsISupports {
|
||||
// These constants are exposed by the rust crate, but it's not worth the
|
||||
// effort of jumping through the hoops to get them exposed to the JS
|
||||
// code in a sane way - so we just duplicate them here. We should consider a
|
||||
// test that checks they match the rust code.
|
||||
// This interface is agnostic WRT the area, so we prefix the constants with
|
||||
// the area - it's the consumer of this interface which knows what to use.
|
||||
const unsigned long SYNC_QUOTA_BYTES = 102400;
|
||||
const unsigned long SYNC_QUOTA_BYTES_PER_ITEM = 8192;
|
||||
const unsigned long SYNC_MAX_ITEMS = 512;
|
||||
|
||||
// Sets one or more key-value pairs specified in `json` for the
|
||||
// `extensionId`. If the `callback` implements
|
||||
// `mozIExtensionStorageListener`, its `onChange`
|
||||
// method will be called with the new and old values.
|
||||
void set(in AUTF8String extensionId,
|
||||
in AUTF8String json,
|
||||
in mozIExtensionStorageCallback callback);
|
||||
|
||||
// Returns the value for the `key` in the storage area for the
|
||||
// `extensionId`. `key` must be a JSON string containing either `null`,
|
||||
// an array of string key names, a single string key name, or an object
|
||||
// where the properties are the key names, and the values are the defaults
|
||||
// if the key name doesn't exist in the storage area.
|
||||
//
|
||||
// If `get()` fails due to the quota being exceeded, the exception will
|
||||
// have a result code of NS_ERROR_DOM_QUOTA_EXCEEDED_ERR (==0x80530016)
|
||||
void get(in AUTF8String extensionId,
|
||||
in AUTF8String key,
|
||||
in mozIExtensionStorageCallback callback);
|
||||
|
||||
// Removes the `key` from the storage area for the `extensionId`. If `key`
|
||||
// exists and the `callback` implements `mozIExtensionStorageListener`, its
|
||||
// `onChanged` method will be called with the removed key-value pair.
|
||||
void remove(in AUTF8String extensionId,
|
||||
in AUTF8String key,
|
||||
in mozIExtensionStorageCallback callback);
|
||||
|
||||
// Removes all keys from the storage area for the `extensionId`. If
|
||||
// `callback` implements `mozIExtensionStorageListener`, its `onChange`
|
||||
// method will be called with all removed key-value pairs.
|
||||
void clear(in AUTF8String extensionId,
|
||||
in mozIExtensionStorageCallback callback);
|
||||
|
||||
// Gets the number of bytes in use for the specified keys.
|
||||
void getBytesInUse(in AUTF8String extensionId,
|
||||
in AUTF8String keys,
|
||||
in mozIExtensionStorageCallback callback);
|
||||
|
||||
// Gets and clears the information about the migration from the kinto
|
||||
// database into the rust one. As "and clears" indicates, this will
|
||||
// only produce a non-empty the first time it's called after a
|
||||
// migration (which, hopefully, should only happen once).
|
||||
void takeMigrationInfo(in mozIExtensionStorageCallback callback);
|
||||
};
|
||||
|
||||
// Implements additional methods for setting up and tearing down the underlying
|
||||
// database connection for a storage area. This is a separate interface because
|
||||
// these methods are not part of the `StorageArea` API, and have restrictions on
|
||||
// when they can be called.
|
||||
[scriptable, uuid(2b008295-1bcc-4610-84f1-ad4cab2fa9ee)]
|
||||
interface mozIConfigurableExtensionStorageArea : nsISupports {
|
||||
// Sets up the storage area. An area can only be configured once; calling
|
||||
// `configure` multiple times will throw. `configure` must also be called
|
||||
// before any of the `mozIExtensionStorageArea` methods, or they'll fail
|
||||
// with errors.
|
||||
// The second param is the path to the kinto database file from which we
|
||||
// should migrate. This should always be specified even when there's a
|
||||
// chance the file doesn't exist.
|
||||
void configure(in nsIFile databaseFile, in nsIFile kintoFile);
|
||||
|
||||
// Tears down the storage area, closing the backing database connection.
|
||||
// This is called automatically when Firefox shuts down. Once a storage area
|
||||
// has been shut down, all its methods will fail with errors. If `configure`
|
||||
// hasn't been called for this area yet, `teardown` is a no-op.
|
||||
void teardown(in mozIExtensionStorageCallback callback);
|
||||
};
|
||||
|
||||
// Implements additional methods for syncing a storage area. This is a separate
|
||||
// interface because these methods are not part of the `StorageArea` API, and
|
||||
// have restrictions on when they can be called.
|
||||
[scriptable, uuid(6dac82c9-1d8a-4893-8c0f-6e626aef802c)]
|
||||
interface mozISyncedExtensionStorageArea : nsISupports {
|
||||
// If a sync is in progress, this method fetches pending change
|
||||
// notifications for all extensions whose storage areas were updated.
|
||||
// `callback` should implement `mozIExtensionStorageListener` to forward
|
||||
// the records to `storage.onChanged` listeners. This method should only
|
||||
// be called by Sync, after `mozIBridgedSyncEngine.apply` and before
|
||||
// `syncFinished`. It fetches nothing if called at any other time.
|
||||
void fetchPendingSyncChanges(in mozIExtensionStorageCallback callback);
|
||||
};
|
||||
|
||||
// A listener for storage area notifications.
|
||||
[scriptable, uuid(8cb3c7e4-d0ca-4353-bccd-2673b4e11510)]
|
||||
interface mozIExtensionStorageListener : nsISupports {
|
||||
// Notifies that an operation has data to pass to `storage.onChanged`
|
||||
// listeners for the given `extensionId`. `json` is a JSON array of listener
|
||||
// infos. If an operation affects multiple extensions, this method will be
|
||||
// called multiple times, once per extension.
|
||||
void onChanged(in AUTF8String extensionId, in AUTF8String json);
|
||||
};
|
||||
|
||||
// A generic callback for a storage operation. Either `handleSuccess` or
|
||||
// `handleError` is guaranteed to be called once.
|
||||
[scriptable, uuid(870dca40-6602-4748-8493-c4253eb7f322)]
|
||||
interface mozIExtensionStorageCallback : nsISupports {
|
||||
// Called when the operation completes. Operations that return a result,
|
||||
// like `get`, will pass a `UTF8String` variant. Those that don't return
|
||||
// anything, like `set` or `remove`, will pass a `null` variant.
|
||||
void handleSuccess(in nsIVariant result);
|
||||
|
||||
// Called when the operation fails.
|
||||
void handleError(in nsresult code, in AUTF8String message);
|
||||
};
|
||||
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "webext_storage_bridge"
|
||||
description = "The WebExtension `storage.sync` bindings for Firefox"
|
||||
version = "0.1.0"
|
||||
authors = ["The Firefox Sync Developers <sync-team@mozilla.com>"]
|
||||
edition = "2018"
|
||||
license = "MPL-2.0"
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
atomic_refcell = "0.1"
|
||||
cstr = "0.2"
|
||||
golden_gate = { path = "../../../../../services/sync/golden_gate" }
|
||||
interrupt-support = "0.1"
|
||||
moz_task = { path = "../../../../../xpcom/rust/moz_task" }
|
||||
nserror = { path = "../../../../../xpcom/rust/nserror" }
|
||||
nsstring = { path = "../../../../../xpcom/rust/nsstring" }
|
||||
once_cell = "1"
|
||||
thin-vec = { version = "0.2.1", features = ["gecko-ffi"] }
|
||||
xpcom = { path = "../../../../../xpcom/rust/xpcom" }
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
storage_variant = { path = "../../../../../storage/variant" }
|
||||
sql-support = "0.1"
|
||||
webext-storage = "0.1"
|
||||
@@ -0,0 +1,484 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::{
|
||||
cell::{Ref, RefCell},
|
||||
convert::TryInto,
|
||||
ffi::OsString,
|
||||
mem,
|
||||
path::PathBuf,
|
||||
str,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use golden_gate::{ApplyTask, BridgedEngine, FerryTask};
|
||||
use moz_task::{DispatchOptions, TaskRunnable};
|
||||
use nserror::{nsresult, NS_OK};
|
||||
use nsstring::{nsACString, nsCString, nsString};
|
||||
use thin_vec::ThinVec;
|
||||
use webext_storage::STORAGE_VERSION;
|
||||
use xpcom::{
|
||||
interfaces::{
|
||||
mozIBridgedSyncEngineApplyCallback, mozIBridgedSyncEngineCallback,
|
||||
mozIExtensionStorageCallback, mozIServicesLogSink, nsIFile, nsISerialEventTarget,
|
||||
},
|
||||
RefPtr,
|
||||
};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::punt::{Punt, PuntTask, TeardownTask};
|
||||
use crate::store::{LazyStore, LazyStoreConfig};
|
||||
|
||||
fn path_from_nsifile(file: &nsIFile) -> Result<PathBuf> {
|
||||
let mut raw_path = nsString::new();
|
||||
// `nsIFile::GetPath` gives us a UTF-16-encoded version of its
|
||||
// native path, which we must turn back into a platform-native
|
||||
// string. We can't use `nsIFile::nativePath()` here because
|
||||
// it's marked as `nostdcall`, which Rust doesn't support.
|
||||
unsafe { file.GetPath(&mut *raw_path) }.to_result()?;
|
||||
let native_path = {
|
||||
// On Windows, we can create a native string directly from the
|
||||
// encoded path.
|
||||
#[cfg(windows)]
|
||||
{
|
||||
use std::os::windows::prelude::*;
|
||||
OsString::from_wide(&raw_path)
|
||||
}
|
||||
// On other platforms, we must first decode the raw path from
|
||||
// UTF-16, and then create our native string.
|
||||
#[cfg(not(windows))]
|
||||
OsString::from(String::from_utf16(&raw_path)?)
|
||||
};
|
||||
Ok(native_path.into())
|
||||
}
|
||||
|
||||
/// An XPCOM component class for the Rust extension storage API. This class
|
||||
/// implements the interfaces needed for syncing and storage.
|
||||
///
|
||||
/// This class can be created on any thread, but must not be shared between
|
||||
/// threads. In Rust terms, it's `Send`, but not `Sync`.
|
||||
#[xpcom(
|
||||
implement(
|
||||
mozIExtensionStorageArea,
|
||||
mozIConfigurableExtensionStorageArea,
|
||||
mozISyncedExtensionStorageArea,
|
||||
mozIInterruptible,
|
||||
mozIBridgedSyncEngine
|
||||
),
|
||||
nonatomic
|
||||
)]
|
||||
pub struct StorageSyncArea {
|
||||
/// A background task queue, used to run all our storage operations on a
|
||||
/// thread pool. Using a serial event target here means that all operations
|
||||
/// will execute sequentially.
|
||||
queue: RefPtr<nsISerialEventTarget>,
|
||||
/// The store is lazily initialized on the task queue the first time it's
|
||||
/// used.
|
||||
store: RefCell<Option<Arc<LazyStore>>>,
|
||||
}
|
||||
|
||||
/// `mozIExtensionStorageArea` implementation.
|
||||
impl StorageSyncArea {
|
||||
/// Creates a storage area and its task queue.
|
||||
pub fn new() -> Result<RefPtr<StorageSyncArea>> {
|
||||
let queue = moz_task::create_background_task_queue(cstr!("StorageSyncArea"))?;
|
||||
Ok(StorageSyncArea::allocate(InitStorageSyncArea {
|
||||
queue,
|
||||
store: RefCell::new(Some(Arc::default())),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Returns the store for this area, or an error if it's been torn down.
|
||||
fn store(&self) -> Result<Ref<'_, Arc<LazyStore>>> {
|
||||
let maybe_store = self.store.borrow();
|
||||
if maybe_store.is_some() {
|
||||
Ok(Ref::map(maybe_store, |s| s.as_ref().unwrap()))
|
||||
} else {
|
||||
Err(Error::AlreadyTornDown)
|
||||
}
|
||||
}
|
||||
|
||||
/// Dispatches a task for a storage operation to the task queue.
|
||||
fn dispatch(&self, punt: Punt, callback: &mozIExtensionStorageCallback) -> Result<()> {
|
||||
let name = punt.name();
|
||||
let task = PuntTask::new(Arc::downgrade(&*self.store()?), punt, callback)?;
|
||||
let runnable = TaskRunnable::new(name, Box::new(task))?;
|
||||
// `may_block` schedules the runnable on a dedicated I/O pool.
|
||||
TaskRunnable::dispatch_with_options(
|
||||
runnable,
|
||||
self.queue.coerce(),
|
||||
DispatchOptions::new().may_block(true),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
configure => Configure(
|
||||
database_file: *const nsIFile,
|
||||
kinto_file: *const nsIFile
|
||||
)
|
||||
);
|
||||
/// Sets up the storage area.
|
||||
fn configure(&self, database_file: &nsIFile, kinto_file: &nsIFile) -> Result<()> {
|
||||
self.store()?.configure(LazyStoreConfig {
|
||||
path: path_from_nsifile(database_file)?,
|
||||
kinto_path: path_from_nsifile(kinto_file)?,
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
set => Set(
|
||||
ext_id: *const ::nsstring::nsACString,
|
||||
json: *const ::nsstring::nsACString,
|
||||
callback: *const mozIExtensionStorageCallback
|
||||
)
|
||||
);
|
||||
/// Sets one or more key-value pairs.
|
||||
fn set(
|
||||
&self,
|
||||
ext_id: &nsACString,
|
||||
json: &nsACString,
|
||||
callback: &mozIExtensionStorageCallback,
|
||||
) -> Result<()> {
|
||||
self.dispatch(
|
||||
Punt::Set {
|
||||
ext_id: str::from_utf8(ext_id)?.into(),
|
||||
value: serde_json::from_str(str::from_utf8(json)?)?,
|
||||
},
|
||||
callback,
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
get => Get(
|
||||
ext_id: *const ::nsstring::nsACString,
|
||||
json: *const ::nsstring::nsACString,
|
||||
callback: *const mozIExtensionStorageCallback
|
||||
)
|
||||
);
|
||||
/// Gets values for one or more keys.
|
||||
fn get(
|
||||
&self,
|
||||
ext_id: &nsACString,
|
||||
json: &nsACString,
|
||||
callback: &mozIExtensionStorageCallback,
|
||||
) -> Result<()> {
|
||||
self.dispatch(
|
||||
Punt::Get {
|
||||
ext_id: str::from_utf8(ext_id)?.into(),
|
||||
keys: serde_json::from_str(str::from_utf8(json)?)?,
|
||||
},
|
||||
callback,
|
||||
)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
remove => Remove(
|
||||
ext_id: *const ::nsstring::nsACString,
|
||||
json: *const ::nsstring::nsACString,
|
||||
callback: *const mozIExtensionStorageCallback
|
||||
)
|
||||
);
|
||||
/// Removes one or more keys and their values.
|
||||
fn remove(
|
||||
&self,
|
||||
ext_id: &nsACString,
|
||||
json: &nsACString,
|
||||
callback: &mozIExtensionStorageCallback,
|
||||
) -> Result<()> {
|
||||
self.dispatch(
|
||||
Punt::Remove {
|
||||
ext_id: str::from_utf8(ext_id)?.into(),
|
||||
keys: serde_json::from_str(str::from_utf8(json)?)?,
|
||||
},
|
||||
callback,
|
||||
)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
clear => Clear(
|
||||
ext_id: *const ::nsstring::nsACString,
|
||||
callback: *const mozIExtensionStorageCallback
|
||||
)
|
||||
);
|
||||
/// Removes all keys and values for the specified extension.
|
||||
fn clear(&self, ext_id: &nsACString, callback: &mozIExtensionStorageCallback) -> Result<()> {
|
||||
self.dispatch(
|
||||
Punt::Clear {
|
||||
ext_id: str::from_utf8(ext_id)?.into(),
|
||||
},
|
||||
callback,
|
||||
)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
getBytesInUse => GetBytesInUse(
|
||||
ext_id: *const ::nsstring::nsACString,
|
||||
keys: *const ::nsstring::nsACString,
|
||||
callback: *const mozIExtensionStorageCallback
|
||||
)
|
||||
);
|
||||
/// Obtains the count of bytes in use for the specified key or for all keys.
|
||||
fn getBytesInUse(
|
||||
&self,
|
||||
ext_id: &nsACString,
|
||||
keys: &nsACString,
|
||||
callback: &mozIExtensionStorageCallback,
|
||||
) -> Result<()> {
|
||||
self.dispatch(
|
||||
Punt::GetBytesInUse {
|
||||
ext_id: str::from_utf8(ext_id)?.into(),
|
||||
keys: serde_json::from_str(str::from_utf8(keys)?)?,
|
||||
},
|
||||
callback,
|
||||
)
|
||||
}
|
||||
|
||||
xpcom_method!(teardown => Teardown(callback: *const mozIExtensionStorageCallback));
|
||||
/// Tears down the storage area, closing the backing database connection.
|
||||
fn teardown(&self, callback: &mozIExtensionStorageCallback) -> Result<()> {
|
||||
// Each storage task holds a `Weak` reference to the store, which it
|
||||
// upgrades to an `Arc` (strong reference) when the task runs on the
|
||||
// background queue. The strong reference is dropped when the task
|
||||
// finishes. When we tear down the storage area, we relinquish our one
|
||||
// owned strong reference to the `TeardownTask`. Because we're using a
|
||||
// task queue, when the `TeardownTask` runs, it should have the only
|
||||
// strong reference to the store, since all other tasks that called
|
||||
// `Weak::upgrade` will have already finished. The `TeardownTask` can
|
||||
// then consume the `Arc` and destroy the store.
|
||||
let mut maybe_store = self.store.borrow_mut();
|
||||
match mem::take(&mut *maybe_store) {
|
||||
Some(store) => {
|
||||
// Interrupt any currently-running statements.
|
||||
store.interrupt();
|
||||
// If dispatching the runnable fails, we'll leak the store
|
||||
// without closing its database connection.
|
||||
teardown(&self.queue, store, callback)?;
|
||||
}
|
||||
None => return Err(Error::AlreadyTornDown),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
xpcom_method!(takeMigrationInfo => TakeMigrationInfo(callback: *const mozIExtensionStorageCallback));
|
||||
|
||||
/// Fetch-and-delete (e.g. `take`) information about the migration from the
|
||||
/// kinto-based extension-storage to the rust-based storage.
|
||||
fn takeMigrationInfo(&self, callback: &mozIExtensionStorageCallback) -> Result<()> {
|
||||
self.dispatch(Punt::TakeMigrationInfo, callback)
|
||||
}
|
||||
}
|
||||
|
||||
fn teardown(
|
||||
queue: &nsISerialEventTarget,
|
||||
store: Arc<LazyStore>,
|
||||
callback: &mozIExtensionStorageCallback,
|
||||
) -> Result<()> {
|
||||
let task = TeardownTask::new(store, callback)?;
|
||||
let runnable = TaskRunnable::new(TeardownTask::name(), Box::new(task))?;
|
||||
TaskRunnable::dispatch_with_options(
|
||||
runnable,
|
||||
queue.coerce(),
|
||||
DispatchOptions::new().may_block(true),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// `mozISyncedExtensionStorageArea` implementation.
|
||||
impl StorageSyncArea {
|
||||
xpcom_method!(
|
||||
fetch_pending_sync_changes => FetchPendingSyncChanges(callback: *const mozIExtensionStorageCallback)
|
||||
);
|
||||
fn fetch_pending_sync_changes(&self, callback: &mozIExtensionStorageCallback) -> Result<()> {
|
||||
self.dispatch(Punt::FetchPendingSyncChanges, callback)
|
||||
}
|
||||
}
|
||||
|
||||
/// `mozIInterruptible` implementation.
|
||||
impl StorageSyncArea {
|
||||
xpcom_method!(
|
||||
interrupt => Interrupt()
|
||||
);
|
||||
/// Interrupts any operations currently running on the background task
|
||||
/// queue.
|
||||
fn interrupt(&self) -> Result<()> {
|
||||
self.store()?.interrupt();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// `mozIBridgedSyncEngine` implementation.
|
||||
impl StorageSyncArea {
|
||||
xpcom_method!(get_logger => GetLogger() -> *const mozIServicesLogSink);
|
||||
fn get_logger(&self) -> Result<RefPtr<mozIServicesLogSink>> {
|
||||
Err(NS_OK)?
|
||||
}
|
||||
|
||||
xpcom_method!(set_logger => SetLogger(logger: *const mozIServicesLogSink));
|
||||
fn set_logger(&self, _logger: Option<&mozIServicesLogSink>) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
xpcom_method!(get_storage_version => GetStorageVersion() -> i32);
|
||||
fn get_storage_version(&self) -> Result<i32> {
|
||||
Ok(STORAGE_VERSION.try_into().unwrap())
|
||||
}
|
||||
|
||||
// It's possible that migration, or even merging, will result in records
|
||||
// too large for the server. We tolerate that (and hope that the addons do
|
||||
// too :)
|
||||
xpcom_method!(get_allow_skipped_record => GetAllowSkippedRecord() -> bool);
|
||||
fn get_allow_skipped_record(&self) -> Result<bool> {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
get_last_sync => GetLastSync(
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn get_last_sync(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
|
||||
Ok(FerryTask::for_last_sync(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
set_last_sync => SetLastSync(
|
||||
last_sync_millis: i64,
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn set_last_sync(
|
||||
&self,
|
||||
last_sync_millis: i64,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<()> {
|
||||
Ok(
|
||||
FerryTask::for_set_last_sync(self.new_bridge()?, last_sync_millis, callback)?
|
||||
.dispatch(&self.queue)?,
|
||||
)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
get_sync_id => GetSyncId(
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn get_sync_id(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
|
||||
Ok(FerryTask::for_sync_id(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
reset_sync_id => ResetSyncId(
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn reset_sync_id(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
|
||||
Ok(FerryTask::for_reset_sync_id(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
ensure_current_sync_id => EnsureCurrentSyncId(
|
||||
new_sync_id: *const nsACString,
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn ensure_current_sync_id(
|
||||
&self,
|
||||
new_sync_id: &nsACString,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<()> {
|
||||
Ok(
|
||||
FerryTask::for_ensure_current_sync_id(self.new_bridge()?, new_sync_id, callback)?
|
||||
.dispatch(&self.queue)?,
|
||||
)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
sync_started => SyncStarted(
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn sync_started(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
|
||||
Ok(FerryTask::for_sync_started(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
store_incoming => StoreIncoming(
|
||||
incoming_envelopes_json: *const ThinVec<::nsstring::nsCString>,
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn store_incoming(
|
||||
&self,
|
||||
incoming_envelopes_json: Option<&ThinVec<nsCString>>,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<()> {
|
||||
Ok(FerryTask::for_store_incoming(
|
||||
self.new_bridge()?,
|
||||
incoming_envelopes_json.map(|v| v.as_slice()).unwrap_or(&[]),
|
||||
callback,
|
||||
)?
|
||||
.dispatch(&self.queue)?)
|
||||
}
|
||||
|
||||
xpcom_method!(apply => Apply(callback: *const mozIBridgedSyncEngineApplyCallback));
|
||||
fn apply(&self, callback: &mozIBridgedSyncEngineApplyCallback) -> Result<()> {
|
||||
Ok(ApplyTask::new(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
set_uploaded => SetUploaded(
|
||||
server_modified_millis: i64,
|
||||
uploaded_ids: *const ThinVec<::nsstring::nsCString>,
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn set_uploaded(
|
||||
&self,
|
||||
server_modified_millis: i64,
|
||||
uploaded_ids: Option<&ThinVec<nsCString>>,
|
||||
callback: &mozIBridgedSyncEngineCallback,
|
||||
) -> Result<()> {
|
||||
Ok(FerryTask::for_set_uploaded(
|
||||
self.new_bridge()?,
|
||||
server_modified_millis,
|
||||
uploaded_ids.map(|v| v.as_slice()).unwrap_or(&[]),
|
||||
callback,
|
||||
)?
|
||||
.dispatch(&self.queue)?)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
sync_finished => SyncFinished(
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn sync_finished(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
|
||||
Ok(FerryTask::for_sync_finished(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
reset => Reset(
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn reset(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
|
||||
Ok(FerryTask::for_reset(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
|
||||
}
|
||||
|
||||
xpcom_method!(
|
||||
wipe => Wipe(
|
||||
callback: *const mozIBridgedSyncEngineCallback
|
||||
)
|
||||
);
|
||||
fn wipe(&self, callback: &mozIBridgedSyncEngineCallback) -> Result<()> {
|
||||
Ok(FerryTask::for_wipe(self.new_bridge()?, callback)?.dispatch(&self.queue)?)
|
||||
}
|
||||
|
||||
fn new_bridge(&self) -> Result<Box<dyn BridgedEngine>> {
|
||||
Ok(Box::new(self.store()?.get()?.bridged_engine()))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,124 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::{error, fmt, result, str::Utf8Error, string::FromUtf16Error};
|
||||
|
||||
use golden_gate::Error as GoldenGateError;
|
||||
use nserror::{
|
||||
nsresult, NS_ERROR_ALREADY_INITIALIZED, NS_ERROR_CANNOT_CONVERT_DATA,
|
||||
NS_ERROR_DOM_QUOTA_EXCEEDED_ERR, NS_ERROR_FAILURE, NS_ERROR_INVALID_ARG,
|
||||
NS_ERROR_NOT_IMPLEMENTED, NS_ERROR_NOT_INITIALIZED, NS_ERROR_UNEXPECTED,
|
||||
};
|
||||
use serde_json::error::Error as JsonError;
|
||||
use webext_storage::error::Error as WebextStorageError;
|
||||
|
||||
/// A specialized `Result` type for extension storage operations.
|
||||
pub type Result<T> = result::Result<T, Error>;
|
||||
|
||||
/// The error type for extension storage operations. Errors can be converted
|
||||
/// into `nsresult` codes, and include more detailed messages that can be passed
|
||||
/// to callbacks.
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
Nsresult(nsresult),
|
||||
WebextStorage(WebextStorageError),
|
||||
MigrationFailed(WebextStorageError),
|
||||
GoldenGate(GoldenGateError),
|
||||
MalformedString(Box<dyn error::Error + Send + Sync + 'static>),
|
||||
AlreadyConfigured,
|
||||
NotConfigured,
|
||||
AlreadyRan(&'static str),
|
||||
DidNotRun(&'static str),
|
||||
AlreadyTornDown,
|
||||
NotImplemented,
|
||||
}
|
||||
|
||||
impl error::Error for Error {
|
||||
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
|
||||
match self {
|
||||
Error::MalformedString(error) => Some(error.as_ref()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<nsresult> for Error {
|
||||
fn from(result: nsresult) -> Error {
|
||||
Error::Nsresult(result)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<WebextStorageError> for Error {
|
||||
fn from(error: WebextStorageError) -> Error {
|
||||
Error::WebextStorage(error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GoldenGateError> for Error {
|
||||
fn from(error: GoldenGateError) -> Error {
|
||||
Error::GoldenGate(error)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Utf8Error> for Error {
|
||||
fn from(error: Utf8Error) -> Error {
|
||||
Error::MalformedString(error.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<FromUtf16Error> for Error {
|
||||
fn from(error: FromUtf16Error) -> Error {
|
||||
Error::MalformedString(error.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<JsonError> for Error {
|
||||
fn from(error: JsonError) -> Error {
|
||||
Error::MalformedString(error.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for nsresult {
|
||||
fn from(error: Error) -> nsresult {
|
||||
match error {
|
||||
Error::Nsresult(result) => result,
|
||||
Error::WebextStorage(e) => match e {
|
||||
WebextStorageError::QuotaError(_) => NS_ERROR_DOM_QUOTA_EXCEEDED_ERR,
|
||||
_ => NS_ERROR_FAILURE,
|
||||
},
|
||||
Error::MigrationFailed(_) => NS_ERROR_CANNOT_CONVERT_DATA,
|
||||
Error::GoldenGate(error) => error.into(),
|
||||
Error::MalformedString(_) => NS_ERROR_INVALID_ARG,
|
||||
Error::AlreadyConfigured => NS_ERROR_ALREADY_INITIALIZED,
|
||||
Error::NotConfigured => NS_ERROR_NOT_INITIALIZED,
|
||||
Error::AlreadyRan(_) => NS_ERROR_UNEXPECTED,
|
||||
Error::DidNotRun(_) => NS_ERROR_UNEXPECTED,
|
||||
Error::AlreadyTornDown => NS_ERROR_UNEXPECTED,
|
||||
Error::NotImplemented => NS_ERROR_NOT_IMPLEMENTED,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Error::Nsresult(result) => write!(f, "Operation failed with {result}"),
|
||||
Error::WebextStorage(error) => error.fmt(f),
|
||||
Error::MigrationFailed(error) => write!(f, "Migration failed with {error}"),
|
||||
Error::GoldenGate(error) => error.fmt(f),
|
||||
Error::MalformedString(error) => error.fmt(f),
|
||||
Error::AlreadyConfigured => write!(f, "The storage area is already configured"),
|
||||
Error::NotConfigured => write!(
|
||||
f,
|
||||
"The storage area must be configured by calling `configure` first"
|
||||
),
|
||||
Error::AlreadyRan(what) => write!(f, "`{what}` already ran on the background thread"),
|
||||
Error::DidNotRun(what) => write!(f, "`{what}` didn't run on the background thread"),
|
||||
Error::AlreadyTornDown => {
|
||||
write!(f, "Can't use a storage area that's already torn down")
|
||||
}
|
||||
Error::NotImplemented => write!(f, "Operation not implemented"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,65 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
//! This crate bridges the WebExtension storage area interfaces in Firefox
|
||||
//! Desktop to the extension storage Rust component in Application Services.
|
||||
//!
|
||||
//! ## How are the WebExtension storage APIs implemented in Firefox?
|
||||
//!
|
||||
//! There are three storage APIs available for WebExtensions:
|
||||
//! `storage.local`, which is stored locally in an IndexedDB database and never
|
||||
//! synced to other devices, `storage.sync`, which is stored in a local SQLite
|
||||
//! database and synced to all devices signed in to the same Firefox Account,
|
||||
//! and `storage.managed`, which is provisioned in a native manifest and
|
||||
//! read-only.
|
||||
//!
|
||||
//! * `storage.local` is implemented in `ExtensionStorageIDB.sys.mjs`.
|
||||
//! * `storage.sync` is implemented in a Rust component, `webext_storage`. This
|
||||
//! Rust component is vendored in m-c, and exposed to JavaScript via an XPCOM
|
||||
//! API in `webext_storage_bridge` (this crate). Eventually, we'll change
|
||||
//! `ExtensionStorageSync.sys.mjs` to call the XPCOM API instead of using the
|
||||
//! old Kinto storage adapter.
|
||||
//! * `storage.managed` is implemented directly in `parent/ext-storage.js`.
|
||||
//!
|
||||
//! `webext_storage_bridge` implements the `mozIExtensionStorageArea`
|
||||
//! (and, eventually, `mozIBridgedSyncEngine`) interface for `storage.sync`. The
|
||||
//! implementation is in `area::StorageSyncArea`, and is backed by the
|
||||
//! `webext_storage` component.
|
||||
|
||||
#[macro_use]
|
||||
extern crate cstr;
|
||||
#[macro_use]
|
||||
extern crate xpcom;
|
||||
|
||||
mod area;
|
||||
mod error;
|
||||
mod punt;
|
||||
mod store;
|
||||
|
||||
use nserror::{nsresult, NS_OK};
|
||||
use xpcom::{interfaces::mozIExtensionStorageArea, RefPtr};
|
||||
|
||||
use crate::area::StorageSyncArea;
|
||||
|
||||
/// The constructor for a `storage.sync` area. This uses C linkage so that it
|
||||
/// can be called from C++. See `ExtensionStorageComponents.h` for the C++
|
||||
/// constructor that's passed to the component manager.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is unsafe because it dereferences `result`.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn NS_NewExtensionStorageSyncArea(
|
||||
result: *mut *const mozIExtensionStorageArea,
|
||||
) -> nsresult {
|
||||
match StorageSyncArea::new() {
|
||||
Ok(bridge) => {
|
||||
RefPtr::new(bridge.coerce::<mozIExtensionStorageArea>()).forget(&mut *result);
|
||||
NS_OK
|
||||
}
|
||||
Err(err) => err.into(),
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,321 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::{
|
||||
borrow::Borrow,
|
||||
fmt::Write,
|
||||
mem, result, str,
|
||||
sync::{Arc, Weak},
|
||||
};
|
||||
|
||||
use atomic_refcell::AtomicRefCell;
|
||||
use moz_task::{Task, ThreadPtrHandle, ThreadPtrHolder};
|
||||
use nserror::nsresult;
|
||||
use nsstring::nsCString;
|
||||
use serde::Serialize;
|
||||
use serde_json::Value as JsonValue;
|
||||
use storage_variant::VariantType;
|
||||
use xpcom::{
|
||||
interfaces::{mozIExtensionStorageCallback, mozIExtensionStorageListener},
|
||||
RefPtr, XpCom,
|
||||
};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::store::LazyStore;
|
||||
|
||||
/// A storage operation that's punted from the main thread to the background
|
||||
/// task queue.
|
||||
pub enum Punt {
|
||||
/// Get the values of the keys for an extension.
|
||||
Get { ext_id: String, keys: JsonValue },
|
||||
/// Set a key-value pair for an extension.
|
||||
Set { ext_id: String, value: JsonValue },
|
||||
/// Remove one or more keys for an extension.
|
||||
Remove { ext_id: String, keys: JsonValue },
|
||||
/// Clear all keys and values for an extension.
|
||||
Clear { ext_id: String },
|
||||
/// Returns the bytes in use for the specified, or all, keys.
|
||||
GetBytesInUse { ext_id: String, keys: JsonValue },
|
||||
/// Fetches all pending Sync change notifications to pass to
|
||||
/// `storage.onChanged` listeners.
|
||||
FetchPendingSyncChanges,
|
||||
/// Fetch-and-delete (e.g. `take`) information about the migration from the
|
||||
/// kinto-based extension-storage to the rust-based storage.
|
||||
///
|
||||
/// This data is stored in the database instead of just being returned by
|
||||
/// the call to `migrate`, as we may migrate prior to telemetry being ready.
|
||||
TakeMigrationInfo,
|
||||
}
|
||||
|
||||
impl Punt {
|
||||
/// Returns the operation name, used to label the task runnable and report
|
||||
/// errors.
|
||||
pub fn name(&self) -> &'static str {
|
||||
match self {
|
||||
Punt::Get { .. } => "webext_storage::get",
|
||||
Punt::Set { .. } => "webext_storage::set",
|
||||
Punt::Remove { .. } => "webext_storage::remove",
|
||||
Punt::Clear { .. } => "webext_storage::clear",
|
||||
Punt::GetBytesInUse { .. } => "webext_storage::get_bytes_in_use",
|
||||
Punt::FetchPendingSyncChanges => "webext_storage::fetch_pending_sync_changes",
|
||||
Punt::TakeMigrationInfo => "webext_storage::take_migration_info",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A storage operation result, punted from the background queue back to the
|
||||
/// main thread.
|
||||
#[derive(Default)]
|
||||
struct PuntResult {
|
||||
changes: Vec<Change>,
|
||||
value: Option<String>,
|
||||
}
|
||||
|
||||
/// A change record for an extension.
|
||||
struct Change {
|
||||
ext_id: String,
|
||||
json: String,
|
||||
}
|
||||
|
||||
impl PuntResult {
|
||||
/// Creates a result with a single change to pass to `onChanged`, and no
|
||||
/// return value for `handleSuccess`. The `Borrow` bound lets this method
|
||||
/// take either a borrowed reference or an owned value.
|
||||
fn with_change<T: Borrow<S>, S: Serialize>(ext_id: &str, changes: T) -> Result<Self> {
|
||||
Ok(PuntResult {
|
||||
changes: vec![Change {
|
||||
ext_id: ext_id.into(),
|
||||
json: serde_json::to_string(changes.borrow())?,
|
||||
}],
|
||||
value: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a result with changes for multiple extensions to pass to
|
||||
/// `onChanged`, and no return value for `handleSuccess`.
|
||||
fn with_changes(changes: Vec<Change>) -> Self {
|
||||
PuntResult {
|
||||
changes,
|
||||
value: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a result with no changes to pass to `onChanged`, and a return
|
||||
/// value for `handleSuccess`.
|
||||
fn with_value<T: Borrow<S>, S: Serialize>(value: T) -> Result<Self> {
|
||||
Ok(PuntResult {
|
||||
changes: Vec::new(),
|
||||
value: Some(serde_json::to_string(value.borrow())?),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A generic task used for all storage operations. Punts the operation to the
|
||||
/// background task queue, receives a result back on the main thread, and calls
|
||||
/// the callback with it.
|
||||
pub struct PuntTask {
|
||||
name: &'static str,
|
||||
/// Storage tasks hold weak references to the store, which they upgrade
|
||||
/// to strong references when running on the background queue. This
|
||||
/// ensures that pending storage tasks don't block teardown (for example,
|
||||
/// if a consumer calls `get` and then `teardown`, without waiting for
|
||||
/// `get` to finish).
|
||||
store: Weak<LazyStore>,
|
||||
punt: AtomicRefCell<Option<Punt>>,
|
||||
callback: ThreadPtrHandle<mozIExtensionStorageCallback>,
|
||||
result: AtomicRefCell<Result<PuntResult>>,
|
||||
}
|
||||
|
||||
impl PuntTask {
|
||||
/// Creates a storage task that punts an operation to the background queue.
|
||||
/// Returns an error if the task couldn't be created because the thread
|
||||
/// manager is shutting down.
|
||||
pub fn new(
|
||||
store: Weak<LazyStore>,
|
||||
punt: Punt,
|
||||
callback: &mozIExtensionStorageCallback,
|
||||
) -> Result<Self> {
|
||||
let name = punt.name();
|
||||
Ok(Self {
|
||||
name,
|
||||
store,
|
||||
punt: AtomicRefCell::new(Some(punt)),
|
||||
callback: ThreadPtrHolder::new(
|
||||
cstr!("mozIExtensionStorageCallback"),
|
||||
RefPtr::new(callback),
|
||||
)?,
|
||||
result: AtomicRefCell::new(Err(Error::DidNotRun(name))),
|
||||
})
|
||||
}
|
||||
|
||||
/// Upgrades the task's weak `LazyStore` reference to a strong one. Returns
|
||||
/// an error if the store has been torn down.
|
||||
///
|
||||
/// It's important that this is called on the background queue, after the
|
||||
/// task has been dispatched. Storage tasks shouldn't hold strong references
|
||||
/// to the store on the main thread, because then they might block teardown.
|
||||
fn store(&self) -> Result<Arc<LazyStore>> {
|
||||
match self.store.upgrade() {
|
||||
Some(store) => Ok(store),
|
||||
None => Err(Error::AlreadyTornDown),
|
||||
}
|
||||
}
|
||||
|
||||
/// Runs this task's storage operation on the background queue.
|
||||
fn inner_run(&self, punt: Punt) -> Result<PuntResult> {
|
||||
Ok(match punt {
|
||||
Punt::Set { ext_id, value } => {
|
||||
PuntResult::with_change(&ext_id, self.store()?.get()?.set(&ext_id, value)?)?
|
||||
}
|
||||
Punt::Get { ext_id, keys } => {
|
||||
PuntResult::with_value(self.store()?.get()?.get(&ext_id, keys)?)?
|
||||
}
|
||||
Punt::Remove { ext_id, keys } => {
|
||||
PuntResult::with_change(&ext_id, self.store()?.get()?.remove(&ext_id, keys)?)?
|
||||
}
|
||||
Punt::Clear { ext_id } => {
|
||||
PuntResult::with_change(&ext_id, self.store()?.get()?.clear(&ext_id)?)?
|
||||
}
|
||||
Punt::GetBytesInUse { ext_id, keys } => {
|
||||
PuntResult::with_value(self.store()?.get()?.get_bytes_in_use(&ext_id, keys)?)?
|
||||
}
|
||||
Punt::FetchPendingSyncChanges => PuntResult::with_changes(
|
||||
self.store()?
|
||||
.get()?
|
||||
.get_synced_changes()?
|
||||
.into_iter()
|
||||
.map(|info| Change {
|
||||
ext_id: info.ext_id,
|
||||
json: info.changes,
|
||||
})
|
||||
.collect(),
|
||||
),
|
||||
Punt::TakeMigrationInfo => {
|
||||
PuntResult::with_value(self.store()?.get()?.take_migration_info()?)?
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Task for PuntTask {
|
||||
fn run(&self) {
|
||||
*self.result.borrow_mut() = match self.punt.borrow_mut().take() {
|
||||
Some(punt) => self.inner_run(punt),
|
||||
// A task should never run on the background queue twice, but we
|
||||
// return an error just in case.
|
||||
None => Err(Error::AlreadyRan(self.name)),
|
||||
};
|
||||
}
|
||||
|
||||
fn done(&self) -> result::Result<(), nsresult> {
|
||||
let callback = self.callback.get().unwrap();
|
||||
// As above, `done` should never be called multiple times, but we handle
|
||||
// that by returning an error.
|
||||
match mem::replace(
|
||||
&mut *self.result.borrow_mut(),
|
||||
Err(Error::AlreadyRan(self.name)),
|
||||
) {
|
||||
Ok(PuntResult { changes, value }) => {
|
||||
// If we have change data, and the callback implements the
|
||||
// listener interface, notify about it first.
|
||||
if let Some(listener) = callback.query_interface::<mozIExtensionStorageListener>() {
|
||||
for Change { ext_id, json } in changes {
|
||||
// Ignore errors.
|
||||
let _ = unsafe {
|
||||
listener.OnChanged(&*nsCString::from(ext_id), &*nsCString::from(json))
|
||||
};
|
||||
}
|
||||
}
|
||||
let result = value.map(nsCString::from).into_variant();
|
||||
unsafe { callback.HandleSuccess(result.coerce()) }
|
||||
}
|
||||
Err(err) => {
|
||||
let mut message = nsCString::new();
|
||||
write!(message, "{err}").unwrap();
|
||||
unsafe { callback.HandleError(err.into(), &*message) }
|
||||
}
|
||||
}
|
||||
.to_result()
|
||||
}
|
||||
}
|
||||
|
||||
/// A task to tear down the store on the background task queue.
|
||||
pub struct TeardownTask {
|
||||
/// Unlike storage tasks, the teardown task holds a strong reference to
|
||||
/// the store, which it drops on the background queue. This is the only
|
||||
/// task that should do that.
|
||||
store: AtomicRefCell<Option<Arc<LazyStore>>>,
|
||||
callback: ThreadPtrHandle<mozIExtensionStorageCallback>,
|
||||
result: AtomicRefCell<Result<()>>,
|
||||
}
|
||||
|
||||
impl TeardownTask {
|
||||
/// Creates a teardown task. This should only be created and dispatched
|
||||
/// once, to clean up the store at shutdown. Returns an error if the task
|
||||
/// couldn't be created because the thread manager is shutting down.
|
||||
pub fn new(store: Arc<LazyStore>, callback: &mozIExtensionStorageCallback) -> Result<Self> {
|
||||
Ok(Self {
|
||||
store: AtomicRefCell::new(Some(store)),
|
||||
callback: ThreadPtrHolder::new(
|
||||
cstr!("mozIExtensionStorageCallback"),
|
||||
RefPtr::new(callback),
|
||||
)?,
|
||||
result: AtomicRefCell::new(Err(Error::DidNotRun(Self::name()))),
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the task name, used to label its runnable and report errors.
|
||||
pub fn name() -> &'static str {
|
||||
"webext_storage::teardown"
|
||||
}
|
||||
|
||||
/// Tears down and drops the store on the background queue.
|
||||
fn inner_run(&self, store: Arc<LazyStore>) -> Result<()> {
|
||||
// At this point, we should be holding the only strong reference
|
||||
// to the store, since 1) `StorageSyncArea` gave its one strong
|
||||
// reference to our task, and 2) we're running on a background
|
||||
// task queue, which runs all tasks sequentially...so no other
|
||||
// `PuntTask`s should be running and trying to upgrade their
|
||||
// weak references. So we can unwrap the `Arc` and take ownership
|
||||
// of the store.
|
||||
match Arc::try_unwrap(store) {
|
||||
Ok(store) => store.teardown(),
|
||||
Err(_) => {
|
||||
// If unwrapping the `Arc` fails, someone else must have
|
||||
// a strong reference to the store. We could sleep and
|
||||
// try again, but this is so unexpected that it's easier
|
||||
// to just leak the store, and return an error to the
|
||||
// callback. Except in tests, we only call `teardown` at
|
||||
// shutdown, so the resources will get reclaimed soon,
|
||||
// anyway.
|
||||
Err(Error::DidNotRun(Self::name()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Task for TeardownTask {
|
||||
fn run(&self) {
|
||||
*self.result.borrow_mut() = match self.store.borrow_mut().take() {
|
||||
Some(store) => self.inner_run(store),
|
||||
None => Err(Error::AlreadyRan(Self::name())),
|
||||
};
|
||||
}
|
||||
|
||||
fn done(&self) -> result::Result<(), nsresult> {
|
||||
let callback = self.callback.get().unwrap();
|
||||
match mem::replace(
|
||||
&mut *self.result.borrow_mut(),
|
||||
Err(Error::AlreadyRan(Self::name())),
|
||||
) {
|
||||
Ok(()) => unsafe { callback.HandleSuccess(().into_variant().coerce()) },
|
||||
Err(err) => {
|
||||
let mut message = nsCString::new();
|
||||
write!(message, "{err}").unwrap();
|
||||
unsafe { callback.HandleError(err.into(), &*message) }
|
||||
}
|
||||
}
|
||||
.to_result()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use std::{fs::remove_file, path::PathBuf, sync::Arc};
|
||||
|
||||
use interrupt_support::SqlInterruptHandle;
|
||||
use once_cell::sync::OnceCell;
|
||||
use webext_storage::store::WebExtStorageStore as Store;
|
||||
|
||||
use crate::error::{self, Error};
|
||||
|
||||
/// Options for an extension storage area.
|
||||
pub struct LazyStoreConfig {
|
||||
/// The path to the database file for this storage area.
|
||||
pub path: PathBuf,
|
||||
/// The path to the old kinto database. If it exists, we should attempt to
|
||||
/// migrate from this database as soon as we open our DB. It's not Option<>
|
||||
/// because the caller will not have checked whether it exists or not, so
|
||||
/// will assume it might.
|
||||
pub kinto_path: PathBuf,
|
||||
}
|
||||
|
||||
/// A lazy store is automatically initialized on a background thread with its
|
||||
/// configuration the first time it's used.
|
||||
#[derive(Default)]
|
||||
pub struct LazyStore {
|
||||
store: OnceCell<InterruptStore>,
|
||||
config: OnceCell<LazyStoreConfig>,
|
||||
}
|
||||
|
||||
/// An `InterruptStore` wraps an inner extension store, and its interrupt
|
||||
/// handle.
|
||||
struct InterruptStore {
|
||||
inner: Store,
|
||||
handle: Arc<SqlInterruptHandle>,
|
||||
}
|
||||
|
||||
impl LazyStore {
|
||||
/// Configures the lazy store. Returns an error if the store has already
|
||||
/// been configured. This method should be called from the main thread.
|
||||
pub fn configure(&self, config: LazyStoreConfig) -> error::Result<()> {
|
||||
self.config
|
||||
.set(config)
|
||||
.map_err(|_| Error::AlreadyConfigured)
|
||||
}
|
||||
|
||||
/// Interrupts all pending operations on the store. If a database statement
|
||||
/// is currently running, this will interrupt that statement. If the
|
||||
/// statement is a write inside an active transaction, the entire
|
||||
/// transaction will be rolled back. This method should be called from the
|
||||
/// main thread.
|
||||
pub fn interrupt(&self) {
|
||||
if let Some(outer) = self.store.get() {
|
||||
outer.handle.interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the underlying store, initializing it if needed. This method
|
||||
/// should only be called from a background thread or task queue, since
|
||||
/// opening the database does I/O.
|
||||
pub fn get(&self) -> error::Result<&Store> {
|
||||
Ok(&self
|
||||
.store
|
||||
.get_or_try_init(|| match self.config.get() {
|
||||
Some(config) => {
|
||||
let store = init_store(config)?;
|
||||
let handle = store.interrupt_handle();
|
||||
Ok(InterruptStore {
|
||||
inner: store,
|
||||
handle,
|
||||
})
|
||||
}
|
||||
None => Err(Error::NotConfigured),
|
||||
})?
|
||||
.inner)
|
||||
}
|
||||
|
||||
/// Tears down the store. If the store wasn't initialized, this is a no-op.
|
||||
/// This should only be called from a background thread or task queue,
|
||||
/// because closing the database also does I/O.
|
||||
pub fn teardown(self) -> error::Result<()> {
|
||||
if let Some(store) = self.store.into_inner() {
|
||||
store.inner.close()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the store, performing a migration if necessary.
|
||||
// The requirements for migration are, roughly:
|
||||
// * If kinto_path doesn't exist, we don't try to migrate.
|
||||
// * If our DB path exists, we assume we've already migrated and don't try again
|
||||
// * If the migration fails, we close our store and delete the DB, then return
|
||||
// a special error code which tells our caller about the failure. It's then
|
||||
// expected to fallback to the "old" kinto store and we'll try next time.
|
||||
// Note that the migrate() method on the store is written such that is should
|
||||
// ignore all "read" errors from the source, but propagate "write" errors on our
|
||||
// DB - the intention is that things like corrupted source databases never fail,
|
||||
// but disk-space failures on our database does.
|
||||
fn init_store(config: &LazyStoreConfig) -> error::Result<Store> {
|
||||
let should_migrate = config.kinto_path.exists() && !config.path.exists();
|
||||
let store = Store::new(&config.path)?;
|
||||
if should_migrate {
|
||||
match store.migrate(&config.kinto_path) {
|
||||
// It's likely to be too early for us to stick the MigrationInfo
|
||||
// into the sync telemetry, a separate call to `take_migration_info`
|
||||
// must be made to the store (this is done by telemetry after it's
|
||||
// ready to submit the data).
|
||||
Ok(()) => {
|
||||
// need logging, but for now let's print to stdout.
|
||||
println!("extension-storage: migration complete");
|
||||
Ok(store)
|
||||
}
|
||||
Err(e) => {
|
||||
println!("extension-storage: migration failure: {e}");
|
||||
if let Err(e) = store.close() {
|
||||
// welp, this probably isn't going to end well...
|
||||
println!(
|
||||
"extension-storage: failed to close the store after migration failure: {e}"
|
||||
);
|
||||
}
|
||||
if let Err(e) = remove_file(&config.path) {
|
||||
// this is bad - if it happens regularly it will defeat
|
||||
// out entire migration strategy - we'll assume it
|
||||
// worked.
|
||||
// So it's desirable to make noise if this happens.
|
||||
println!("Failed to remove file after failed migration: {e}");
|
||||
}
|
||||
Err(Error::MigrationFailed(e))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(store)
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
/* import-globals-from head.js */
|
||||
|
||||
const STORAGE_SYNC_PREF = "webextensions.storage.sync.enabled";
|
||||
|
||||
// Test implementations and utility functions that are used against multiple
|
||||
// storage areas (eg, a test which is run against browser.storage.local and
|
||||
// browser.storage.sync, or a test against browser.storage.sync but needs to
|
||||
@@ -74,6 +76,49 @@ async function checkGetImpl(areaName, prop, value) {
|
||||
);
|
||||
}
|
||||
|
||||
function test_config_flag_needed() {
|
||||
async function testFn() {
|
||||
function background() {
|
||||
let promises = [];
|
||||
let apiTests = [
|
||||
{ method: "get", args: ["foo"] },
|
||||
{ method: "set", args: [{ foo: "bar" }] },
|
||||
{ method: "remove", args: ["foo"] },
|
||||
{ method: "clear", args: [] },
|
||||
];
|
||||
apiTests.forEach(testDef => {
|
||||
promises.push(
|
||||
browser.test.assertRejects(
|
||||
browser.storage.sync[testDef.method](...testDef.args),
|
||||
"Please set webextensions.storage.sync.enabled to true in about:config",
|
||||
`storage.sync.${testDef.method} is behind a flag`
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
Promise.all(promises).then(() => browser.test.notifyPass("flag needed"));
|
||||
}
|
||||
|
||||
ok(
|
||||
!Services.prefs.getBoolPref(STORAGE_SYNC_PREF, false),
|
||||
"The `${STORAGE_SYNC_PREF}` should be set to false"
|
||||
);
|
||||
|
||||
let extension = ExtensionTestUtils.loadExtension({
|
||||
manifest: {
|
||||
permissions: ["storage"],
|
||||
},
|
||||
background,
|
||||
});
|
||||
|
||||
await extension.startup();
|
||||
await extension.awaitFinish("flag needed");
|
||||
await extension.unload();
|
||||
}
|
||||
|
||||
return runWithPrefs([[STORAGE_SYNC_PREF, false]], testFn);
|
||||
}
|
||||
|
||||
async function test_storage_after_reload(areaName, { expectPersistency }) {
|
||||
// Just some random extension ID that we can re-use
|
||||
const extensionId = "my-extension-id@1";
|
||||
@@ -124,8 +169,15 @@ async function test_storage_after_reload(areaName, { expectPersistency }) {
|
||||
await extension2.unload();
|
||||
}
|
||||
|
||||
async function test_sync_reloading_extensions_works() {
|
||||
await test_storage_after_reload("sync", { expectPersistency: true });
|
||||
function test_sync_reloading_extensions_works() {
|
||||
return runWithPrefs([[STORAGE_SYNC_PREF, true]], async () => {
|
||||
ok(
|
||||
Services.prefs.getBoolPref(STORAGE_SYNC_PREF, false),
|
||||
"The `${STORAGE_SYNC_PREF}` should be set to true"
|
||||
);
|
||||
|
||||
await test_storage_after_reload("sync", { expectPersistency: true });
|
||||
});
|
||||
}
|
||||
|
||||
async function test_background_page_storage(testAreaName) {
|
||||
@@ -649,7 +701,7 @@ async function test_background_page_storage(testAreaName) {
|
||||
await extension.unload();
|
||||
}
|
||||
|
||||
async function test_storage_sync_requires_real_id() {
|
||||
function test_storage_sync_requires_real_id() {
|
||||
async function testFn() {
|
||||
async function background() {
|
||||
const EXCEPTION_MESSAGE =
|
||||
@@ -680,7 +732,7 @@ async function test_storage_sync_requires_real_id() {
|
||||
await extension.unload();
|
||||
}
|
||||
|
||||
return await testFn();
|
||||
return runWithPrefs([[STORAGE_SYNC_PREF, true]], testFn);
|
||||
}
|
||||
|
||||
// Test for storage areas which don't support getBytesInUse() nor QUOTA
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
|
||||
"use strict";
|
||||
|
||||
/* exported withSyncContext */
|
||||
|
||||
const { ExtensionCommon } = ChromeUtils.importESModule(
|
||||
"resource://gre/modules/ExtensionCommon.sys.mjs"
|
||||
);
|
||||
@@ -42,3 +44,23 @@ async function withContext(f) {
|
||||
await context.unload();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Like withContext(), but also turn on the "storage.sync" pref for
|
||||
* the duration of the function.
|
||||
* Calls to this function can be replaced with calls to withContext
|
||||
* once the pref becomes on by default.
|
||||
*
|
||||
* @param {Function} f the function to call
|
||||
*/
|
||||
async function withSyncContext(f) {
|
||||
const STORAGE_SYNC_PREF = "webextensions.storage.sync.enabled";
|
||||
let prefs = Services.prefs;
|
||||
|
||||
try {
|
||||
prefs.setBoolPref(STORAGE_SYNC_PREF, true);
|
||||
await withContext(f);
|
||||
} finally {
|
||||
prefs.clearUserPref(STORAGE_SYNC_PREF);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
/* -*- Mode: indent-tabs-mode: nil; js-indent-level: 2 -*- */
|
||||
/* vim: set sts=2 sw=2 et tw=80: */
|
||||
"use strict";
|
||||
|
||||
// Import the rust-based and kinto-based implementations
|
||||
const { extensionStorageSync: rustImpl } = ChromeUtils.importESModule(
|
||||
"resource://gre/modules/ExtensionStorageSync.sys.mjs"
|
||||
);
|
||||
const { extensionStorageSyncKinto: kintoImpl } = ChromeUtils.importESModule(
|
||||
"resource://gre/modules/ExtensionStorageSyncKinto.sys.mjs"
|
||||
);
|
||||
|
||||
Services.prefs.setBoolPref("webextensions.storage.sync.kinto", false);
|
||||
|
||||
add_task(async function test_sync_migration() {
|
||||
// There's no good reason to perform this test via test extensions - we just
|
||||
// call the underlying APIs directly.
|
||||
|
||||
// Set some stuff using the kinto-based impl.
|
||||
let e1 = { id: "test@mozilla.com" };
|
||||
let c1 = { extension: e1, callOnClose() {} };
|
||||
await kintoImpl.set(e1, { foo: "bar" }, c1);
|
||||
|
||||
let e2 = { id: "test-2@mozilla.com" };
|
||||
let c2 = { extension: e2, callOnClose() {} };
|
||||
await kintoImpl.set(e2, { second: "2nd" }, c2);
|
||||
|
||||
let e3 = { id: "test-3@mozilla.com" };
|
||||
let c3 = { extension: e3, callOnClose() {} };
|
||||
|
||||
// And all the data should be magically migrated.
|
||||
Assert.deepEqual(await rustImpl.get(e1, "foo", c1), { foo: "bar" });
|
||||
Assert.deepEqual(await rustImpl.get(e2, null, c2), { second: "2nd" });
|
||||
|
||||
// Sanity check we really are doing what we think we are - set a value in our
|
||||
// new one, it should not be reflected by kinto.
|
||||
await rustImpl.set(e3, { third: "3rd" }, c3);
|
||||
Assert.deepEqual(await rustImpl.get(e3, null, c3), { third: "3rd" });
|
||||
Assert.deepEqual(await kintoImpl.get(e3, null, c3), {});
|
||||
// cleanup.
|
||||
await kintoImpl.clear(e1, c1);
|
||||
await kintoImpl.clear(e2, c2);
|
||||
await kintoImpl.clear(e3, c3);
|
||||
await rustImpl.clear(e1, c1);
|
||||
await rustImpl.clear(e2, c2);
|
||||
await rustImpl.clear(e3, c3);
|
||||
});
|
||||
|
||||
// It would be great to have failure tests, but that seems impossible to have
|
||||
// in automated tests given the conditions under which we migrate - it would
|
||||
// basically require us to arrange for zero free disk space or to somehow
|
||||
// arrange for sqlite to see an io error. Specially crafted "corrupt"
|
||||
// sqlite files doesn't help because that file must not exist for us to even
|
||||
// attempt migration.
|
||||
//
|
||||
// But - what we can test is that if .migratedOk on the new impl ever goes to
|
||||
// false we delegate correctly.
|
||||
add_task(async function test_sync_migration_delgates() {
|
||||
let e1 = { id: "test@mozilla.com" };
|
||||
let c1 = { extension: e1, callOnClose() {} };
|
||||
await kintoImpl.set(e1, { foo: "bar" }, c1);
|
||||
|
||||
// We think migration went OK - `get` shouldn't see kinto.
|
||||
Assert.deepEqual(rustImpl.get(e1, null, c1), {});
|
||||
|
||||
info(
|
||||
"Setting migration failure flag to ensure we delegate to kinto implementation"
|
||||
);
|
||||
rustImpl.migrationOk = false;
|
||||
// get should now be seeing kinto.
|
||||
Assert.deepEqual(await rustImpl.get(e1, null, c1), { foo: "bar" });
|
||||
// check everything else delegates.
|
||||
|
||||
await rustImpl.set(e1, { foo: "foo" }, c1);
|
||||
Assert.deepEqual(await kintoImpl.get(e1, null, c1), { foo: "foo" });
|
||||
|
||||
Assert.equal(await rustImpl.getBytesInUse(e1, null, c1), 8);
|
||||
|
||||
await rustImpl.remove(e1, "foo", c1);
|
||||
Assert.deepEqual(await kintoImpl.get(e1, null, c1), {});
|
||||
|
||||
await rustImpl.set(e1, { foo: "foo" }, c1);
|
||||
Assert.deepEqual(await kintoImpl.get(e1, null, c1), { foo: "foo" });
|
||||
await rustImpl.clear(e1, c1);
|
||||
Assert.deepEqual(await kintoImpl.get(e1, null, c1), {});
|
||||
});
|
||||
@@ -3,16 +3,40 @@
|
||||
|
||||
"use strict";
|
||||
|
||||
ChromeUtils.defineESModuleGetters(this, {
|
||||
extensionStorageSync: "resource://gre/modules/ExtensionStorageSync.sys.mjs",
|
||||
Service: "resource://services-sync/service.sys.mjs",
|
||||
QuotaError: "resource://gre/modules/RustWebextstorage.sys.mjs",
|
||||
});
|
||||
const NS_ERROR_DOM_QUOTA_EXCEEDED_ERR = 0x80530016;
|
||||
|
||||
const { ExtensionStorageEngineBridge } = ChromeUtils.importESModule(
|
||||
"resource://services-sync/engines/extension-storage.sys.mjs"
|
||||
XPCOMUtils.defineLazyServiceGetter(
|
||||
this,
|
||||
"StorageSyncService",
|
||||
"@mozilla.org/extensions/storage/sync;1",
|
||||
"nsIInterfaceRequestor"
|
||||
);
|
||||
const SYNC_QUOTA_BYTES = 102400;
|
||||
|
||||
function promisify(func, ...params) {
|
||||
return new Promise((resolve, reject) => {
|
||||
let changes = [];
|
||||
func(...params, {
|
||||
QueryInterface: ChromeUtils.generateQI([
|
||||
"mozIExtensionStorageListener",
|
||||
"mozIExtensionStorageCallback",
|
||||
"mozIBridgedSyncEngineCallback",
|
||||
"mozIBridgedSyncEngineApplyCallback",
|
||||
]),
|
||||
onChanged(extId, json) {
|
||||
changes.push({ extId, changes: JSON.parse(json) });
|
||||
},
|
||||
handleSuccess(value) {
|
||||
resolve({
|
||||
changes,
|
||||
value: typeof value == "string" ? JSON.parse(value) : value,
|
||||
});
|
||||
},
|
||||
handleError(code, message) {
|
||||
reject(Components.Exception(message, code));
|
||||
},
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
add_task(async function setup_storage_sync() {
|
||||
// So that we can write to the profile directory.
|
||||
@@ -20,95 +44,92 @@ add_task(async function setup_storage_sync() {
|
||||
});
|
||||
|
||||
add_task(async function test_storage_sync_service() {
|
||||
const service = extensionStorageSync;
|
||||
const service = StorageSyncService.getInterface(Ci.mozIExtensionStorageArea);
|
||||
{
|
||||
// mocking notifyListeners so we have access to the return value of `service.set`
|
||||
service.notifyListeners = (extId, changeSet) => {
|
||||
equal(extId, "ext-1");
|
||||
let expected = {
|
||||
hi: {
|
||||
newValue: "hello! 💖",
|
||||
let { changes, value } = await promisify(
|
||||
service.set,
|
||||
"ext-1",
|
||||
JSON.stringify({
|
||||
hi: "hello! 💖",
|
||||
bye: "adiós",
|
||||
})
|
||||
);
|
||||
deepEqual(
|
||||
changes,
|
||||
[
|
||||
{
|
||||
extId: "ext-1",
|
||||
changes: {
|
||||
hi: {
|
||||
newValue: "hello! 💖",
|
||||
},
|
||||
bye: {
|
||||
newValue: "adiós",
|
||||
},
|
||||
},
|
||||
},
|
||||
bye: {
|
||||
newValue: "adiós",
|
||||
},
|
||||
};
|
||||
|
||||
deepEqual(
|
||||
[changeSet],
|
||||
[expected],
|
||||
"`set` should notify listeners about changes"
|
||||
);
|
||||
};
|
||||
|
||||
let newValue = {
|
||||
hi: "hello! 💖",
|
||||
bye: "adiós",
|
||||
};
|
||||
|
||||
// finalling calling `service.set` which asserts the deepEqual in the above mocked `notifyListeners`
|
||||
await service.set({ id: "ext-1" }, newValue);
|
||||
],
|
||||
"`set` should notify listeners about changes"
|
||||
);
|
||||
ok(!value, "`set` should not return a value");
|
||||
}
|
||||
|
||||
{
|
||||
service.notifyListeners = (_extId, _changeSet) => {
|
||||
console.log(`NOTIFY LISTENERS`);
|
||||
};
|
||||
let { changes, value } = await promisify(
|
||||
service.get,
|
||||
"ext-1",
|
||||
JSON.stringify(["hi"])
|
||||
);
|
||||
deepEqual(changes, [], "`get` should not notify listeners");
|
||||
deepEqual(
|
||||
value,
|
||||
{
|
||||
hi: "hello! 💖",
|
||||
},
|
||||
"`get` with key should return value"
|
||||
);
|
||||
|
||||
let expected = {
|
||||
hi: "hello! 💖",
|
||||
};
|
||||
|
||||
let value = await service.get({ id: "ext-1" }, ["hi"]);
|
||||
deepEqual(value, expected, "`get` with key should return value");
|
||||
|
||||
let expected2 = {
|
||||
hi: "hello! 💖",
|
||||
bye: "adiós",
|
||||
};
|
||||
|
||||
let allValues = await service.get({ id: "ext-1" }, null);
|
||||
let { value: allValues } = await promisify(service.get, "ext-1", "null");
|
||||
deepEqual(
|
||||
allValues,
|
||||
expected2,
|
||||
{
|
||||
hi: "hello! 💖",
|
||||
bye: "adiós",
|
||||
},
|
||||
"`get` without a key should return all values"
|
||||
);
|
||||
}
|
||||
|
||||
{
|
||||
service.notifyListeners = (extId, changeSet) => {
|
||||
console.log("notifyListeners", extId, changeSet);
|
||||
};
|
||||
|
||||
let newValue = {
|
||||
hi: "hola! 👋",
|
||||
};
|
||||
|
||||
await service.set({ id: "ext-2" }, newValue);
|
||||
await service.clear({ id: "ext-1" });
|
||||
let allValues = await service.get({ id: "ext-1" }, null);
|
||||
await promisify(
|
||||
service.set,
|
||||
"ext-2",
|
||||
JSON.stringify({
|
||||
hi: "hola! 👋",
|
||||
})
|
||||
);
|
||||
await promisify(service.clear, "ext-1");
|
||||
let { value: allValues } = await promisify(service.get, "ext-1", "null");
|
||||
deepEqual(allValues, {}, "clear removed ext-1");
|
||||
|
||||
let allValues2 = await service.get({ id: "ext-2" }, null);
|
||||
let expected = { hi: "hola! 👋" };
|
||||
deepEqual(allValues2, expected, "clear didn't remove ext-2");
|
||||
let { value: allValues2 } = await promisify(service.get, "ext-2", "null");
|
||||
deepEqual(allValues2, { hi: "hola! 👋" }, "clear didn't remove ext-2");
|
||||
// We need to clear data for ext-2 too, so later tests don't fail due to
|
||||
// this data.
|
||||
await service.clear({ id: "ext-2" });
|
||||
await promisify(service.clear, "ext-2");
|
||||
}
|
||||
});
|
||||
|
||||
add_task(async function test_storage_sync_bridged_engine() {
|
||||
let engine = new ExtensionStorageEngineBridge(Service);
|
||||
await engine.initialize();
|
||||
let area = engine._rustStore;
|
||||
const area = StorageSyncService.getInterface(Ci.mozIExtensionStorageArea);
|
||||
const engine = StorageSyncService.getInterface(Ci.mozIBridgedSyncEngine);
|
||||
|
||||
info("Add some local items");
|
||||
await area.set("ext-1", JSON.stringify({ a: "abc" }));
|
||||
await area.set("ext-2", JSON.stringify({ b: "xyz" }));
|
||||
await promisify(area.set, "ext-1", JSON.stringify({ a: "abc" }));
|
||||
await promisify(area.set, "ext-2", JSON.stringify({ b: "xyz" }));
|
||||
|
||||
info("Start a sync");
|
||||
await engine._bridge.syncStarted();
|
||||
await promisify(engine.syncStarted);
|
||||
|
||||
info("Store some incoming synced items");
|
||||
let incomingEnvelopesAsJSON = [
|
||||
@@ -133,24 +154,20 @@ add_task(async function test_storage_sync_bridged_engine() {
|
||||
}),
|
||||
},
|
||||
].map(e => JSON.stringify(e));
|
||||
|
||||
await engine._bridge.storeIncoming(incomingEnvelopesAsJSON);
|
||||
await promisify(area.storeIncoming, incomingEnvelopesAsJSON);
|
||||
|
||||
info("Merge");
|
||||
// Three levels of JSON wrapping: each outgoing envelope, the cleartext in
|
||||
// each envelope, and the extension storage data in each cleartext payload.
|
||||
let outgoingEnvelopesAsJSON = await engine._bridge.apply();
|
||||
let { value: outgoingEnvelopesAsJSON } = await promisify(area.apply);
|
||||
let outgoingEnvelopes = outgoingEnvelopesAsJSON.map(json => JSON.parse(json));
|
||||
let parsedCleartexts = outgoingEnvelopes.map(e => JSON.parse(e.payload));
|
||||
let parsedData = parsedCleartexts.map(c => JSON.parse(c.data));
|
||||
|
||||
let changes = (await area.getSyncedChanges()).map(change => {
|
||||
return {
|
||||
extId: change.extId,
|
||||
changes: JSON.parse(change.changes),
|
||||
};
|
||||
});
|
||||
|
||||
let { changes } = await promisify(
|
||||
area.QueryInterface(Ci.mozISyncedExtensionStorageArea)
|
||||
.fetchPendingSyncChanges
|
||||
);
|
||||
deepEqual(
|
||||
changes,
|
||||
[
|
||||
@@ -199,15 +216,15 @@ add_task(async function test_storage_sync_bridged_engine() {
|
||||
);
|
||||
|
||||
info("Mark all extensions as uploaded");
|
||||
await engine._bridge.setUploaded(0, [ext1Guid, "guidAAA"]);
|
||||
await promisify(engine.setUploaded, 0, [ext1Guid, "guidAAA"]);
|
||||
|
||||
info("Finish sync");
|
||||
await engine._bridge.syncFinished();
|
||||
await promisify(engine.syncFinished);
|
||||
|
||||
// Try fetching values for the remote-only extension we just synced.
|
||||
let ext3Value = await area.get("ext-3", "null");
|
||||
let { value: ext3Value } = await promisify(area.get, "ext-3", "null");
|
||||
deepEqual(
|
||||
JSON.parse(ext3Value),
|
||||
ext3Value,
|
||||
{
|
||||
d: "new! ✨",
|
||||
},
|
||||
@@ -215,47 +232,43 @@ add_task(async function test_storage_sync_bridged_engine() {
|
||||
);
|
||||
|
||||
info("Try applying a second time");
|
||||
let secondApply = await engine._bridge.apply();
|
||||
deepEqual(secondApply, {}, "Shouldn't merge anything on second apply");
|
||||
let secondApply = await promisify(area.apply);
|
||||
deepEqual(secondApply.value, {}, "Shouldn't merge anything on second apply");
|
||||
|
||||
info("Wipe all items");
|
||||
await engine._bridge.wipe();
|
||||
await promisify(engine.wipe);
|
||||
|
||||
for (let extId of ["ext-1", "ext-2", "ext-3"]) {
|
||||
// `get` always returns an object, even if there are no keys for the
|
||||
// extension ID.
|
||||
let value = await area.get(extId, "null");
|
||||
deepEqual(
|
||||
JSON.parse(value),
|
||||
{},
|
||||
`Wipe should remove all values for ${extId}`
|
||||
);
|
||||
let { value } = await promisify(area.get, extId, "null");
|
||||
deepEqual(value, {}, `Wipe should remove all values for ${extId}`);
|
||||
}
|
||||
});
|
||||
|
||||
add_task(async function test_storage_sync_quota() {
|
||||
let engine = new ExtensionStorageEngineBridge(Service);
|
||||
await engine.initialize();
|
||||
let service = engine._rustStore;
|
||||
const service = StorageSyncService.getInterface(Ci.mozIExtensionStorageArea);
|
||||
const engine = StorageSyncService.getInterface(Ci.mozIBridgedSyncEngine);
|
||||
await promisify(engine.wipe);
|
||||
await promisify(service.set, "ext-1", JSON.stringify({ x: "hi" }));
|
||||
await promisify(service.set, "ext-1", JSON.stringify({ longer: "value" }));
|
||||
|
||||
await engine._bridge.wipe();
|
||||
await service.set("ext-1", JSON.stringify({ x: "hi" }));
|
||||
await service.set("ext-1", JSON.stringify({ longer: "value" }));
|
||||
|
||||
let v1 = await service.getBytesInUse("ext-1", '"x"');
|
||||
let { value: v1 } = await promisify(service.getBytesInUse, "ext-1", '"x"');
|
||||
Assert.equal(v1, 5); // key len without quotes, value len with quotes.
|
||||
let v2 = await service.getBytesInUse("ext-1", "null");
|
||||
let { value: v2 } = await promisify(service.getBytesInUse, "ext-1", "null");
|
||||
// 5 from 'x', plus 'longer' (6 for key, 7 for value = 13) = 18.
|
||||
Assert.equal(v2, 18);
|
||||
|
||||
// Now set something greater than our quota.
|
||||
await Assert.rejects(
|
||||
service.set(
|
||||
promisify(
|
||||
service.set,
|
||||
"ext-1",
|
||||
JSON.stringify({
|
||||
big: "x".repeat(SYNC_QUOTA_BYTES),
|
||||
big: "x".repeat(Ci.mozIExtensionStorageArea.SYNC_QUOTA_BYTES),
|
||||
})
|
||||
),
|
||||
QuotaError,
|
||||
"should reject with QuotaError"
|
||||
ex => ex.result == NS_ERROR_DOM_QUOTA_EXCEEDED_ERR,
|
||||
"should reject with NS_ERROR_DOM_QUOTA_EXCEEDED_ERR"
|
||||
);
|
||||
});
|
||||
|
||||
@@ -19,9 +19,13 @@ add_task(async function setup() {
|
||||
});
|
||||
|
||||
add_task(async function test_contentscript_storage_sync() {
|
||||
await test_contentscript_storage("sync");
|
||||
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
|
||||
test_contentscript_storage("sync")
|
||||
);
|
||||
});
|
||||
|
||||
add_task(async function test_contentscript_bytes_in_use_sync() {
|
||||
await test_contentscript_storage_area_with_bytes_in_use("sync", true);
|
||||
return runWithPrefs([[STORAGE_SYNC_PREF, true]], () =>
|
||||
test_contentscript_storage_area_with_bytes_in_use("sync", true)
|
||||
);
|
||||
});
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user