Bug 1878149 - Add Filelist structured cloning support to IDB. r=dom-storage-reviewers,janv,asuth
Differential Revision: https://phabricator.services.mozilla.com/D222910
This commit is contained in:
@@ -37,6 +37,8 @@
|
||||
#include "mozilla/dom/BlobBinding.h"
|
||||
#include "mozilla/dom/Document.h"
|
||||
#include "mozilla/dom/File.h"
|
||||
#include "mozilla/dom/FileList.h"
|
||||
#include "mozilla/dom/FileListBinding.h"
|
||||
#include "mozilla/dom/IDBObjectStoreBinding.h"
|
||||
#include "mozilla/dom/MemoryBlobImpl.h"
|
||||
#include "mozilla/dom/StreamBlobImpl.h"
|
||||
@@ -46,6 +48,7 @@
|
||||
#include "mozilla/ipc/BackgroundChild.h"
|
||||
#include "mozilla/ipc/PBackgroundSharedTypes.h"
|
||||
#include "nsCOMPtr.h"
|
||||
#include "nsIGlobalObject.h"
|
||||
#include "nsStreamUtils.h"
|
||||
#include "nsStringStream.h"
|
||||
|
||||
@@ -144,6 +147,71 @@ MovingNotNull<RefPtr<IDBRequest>> GenerateRequest(
|
||||
std::move(transaction));
|
||||
}
|
||||
|
||||
bool WriteBlob(JSContext* aCx, JSStructuredCloneWriter* aWriter,
|
||||
Blob* const aBlob,
|
||||
IDBObjectStore::StructuredCloneWriteInfo* aCloneWriteInfo) {
|
||||
MOZ_ASSERT(aCx);
|
||||
MOZ_ASSERT(aWriter);
|
||||
MOZ_ASSERT(aBlob);
|
||||
MOZ_ASSERT(aCloneWriteInfo);
|
||||
|
||||
ErrorResult rv;
|
||||
const uint64_t nativeEndianSize = aBlob->GetSize(rv);
|
||||
MOZ_ASSERT(!rv.Failed());
|
||||
|
||||
const uint64_t size = NativeEndian::swapToLittleEndian(nativeEndianSize);
|
||||
|
||||
nsString type;
|
||||
aBlob->GetType(type);
|
||||
|
||||
const NS_ConvertUTF16toUTF8 convType(type);
|
||||
const uint32_t convTypeLength =
|
||||
NativeEndian::swapToLittleEndian(convType.Length());
|
||||
|
||||
if (aCloneWriteInfo->mFiles.Length() > size_t(UINT32_MAX)) {
|
||||
MOZ_ASSERT(false, "Fix the structured clone data to use a bigger type!");
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
const uint32_t index = aCloneWriteInfo->mFiles.Length();
|
||||
|
||||
if (!JS_WriteUint32Pair(
|
||||
aWriter, aBlob->IsFile() ? SCTAG_DOM_FILE : SCTAG_DOM_BLOB, index) ||
|
||||
!JS_WriteBytes(aWriter, &size, sizeof(size)) ||
|
||||
!JS_WriteBytes(aWriter, &convTypeLength, sizeof(convTypeLength)) ||
|
||||
!JS_WriteBytes(aWriter, convType.get(), convType.Length())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const RefPtr<File> file = aBlob->ToFile();
|
||||
if (file) {
|
||||
ErrorResult rv;
|
||||
const int64_t nativeEndianLastModifiedDate = file->GetLastModified(rv);
|
||||
MOZ_ALWAYS_TRUE(!rv.Failed());
|
||||
|
||||
const int64_t lastModifiedDate =
|
||||
NativeEndian::swapToLittleEndian(nativeEndianLastModifiedDate);
|
||||
|
||||
nsString name;
|
||||
file->GetName(name);
|
||||
|
||||
const NS_ConvertUTF16toUTF8 convName(name);
|
||||
const uint32_t convNameLength =
|
||||
NativeEndian::swapToLittleEndian(convName.Length());
|
||||
|
||||
if (!JS_WriteBytes(aWriter, &lastModifiedDate, sizeof(lastModifiedDate)) ||
|
||||
!JS_WriteBytes(aWriter, &convNameLength, sizeof(convNameLength)) ||
|
||||
!JS_WriteBytes(aWriter, convName.get(), convName.Length())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
aCloneWriteInfo->mFiles.EmplaceBack(StructuredCloneFileBase::eBlob, aBlob);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool StructuredCloneWriteCallback(JSContext* aCx,
|
||||
JSStructuredCloneWriter* aWriter,
|
||||
JS::Handle<JSObject*> aObj,
|
||||
@@ -168,72 +236,68 @@ bool StructuredCloneWriteCallback(JSContext* aCx,
|
||||
JS::Rooted<JSObject*> obj(aCx, aObj);
|
||||
|
||||
{
|
||||
Blob* blob = nullptr;
|
||||
if (NS_SUCCEEDED(UNWRAP_OBJECT(Blob, &obj, blob))) {
|
||||
ErrorResult rv;
|
||||
const uint64_t nativeEndianSize = blob->GetSize(rv);
|
||||
MOZ_ASSERT(!rv.Failed());
|
||||
FileList* fileList = nullptr;
|
||||
if (NS_SUCCEEDED(UNWRAP_OBJECT(FileList, &obj, fileList))) {
|
||||
const auto fileListStartIndex = cloneWriteInfo->mFiles.Length();
|
||||
const uint32_t fileListLength = fileList->Length();
|
||||
|
||||
const uint64_t size = NativeEndian::swapToLittleEndian(nativeEndianSize);
|
||||
|
||||
nsString type;
|
||||
blob->GetType(type);
|
||||
|
||||
const NS_ConvertUTF16toUTF8 convType(type);
|
||||
const uint32_t convTypeLength =
|
||||
NativeEndian::swapToLittleEndian(convType.Length());
|
||||
|
||||
if (cloneWriteInfo->mFiles.Length() > size_t(UINT32_MAX)) {
|
||||
if (size_t(fileListStartIndex) > size_t(UINT32_MAX) - fileListLength) {
|
||||
MOZ_ASSERT(false,
|
||||
"Fix the structured clone data to use a bigger type!");
|
||||
return false;
|
||||
}
|
||||
|
||||
const uint32_t index = cloneWriteInfo->mFiles.Length();
|
||||
|
||||
if (!JS_WriteUint32Pair(aWriter,
|
||||
blob->IsFile() ? SCTAG_DOM_FILE : SCTAG_DOM_BLOB,
|
||||
index) ||
|
||||
!JS_WriteBytes(aWriter, &size, sizeof(size)) ||
|
||||
!JS_WriteBytes(aWriter, &convTypeLength, sizeof(convTypeLength)) ||
|
||||
!JS_WriteBytes(aWriter, convType.get(), convType.Length())) {
|
||||
if (!JS_WriteUint32Pair(aWriter, SCTAG_DOM_FILELIST, fileListLength)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const RefPtr<File> file = blob->ToFile();
|
||||
if (file) {
|
||||
ErrorResult rv;
|
||||
const int64_t nativeEndianLastModifiedDate = file->GetLastModified(rv);
|
||||
MOZ_ALWAYS_TRUE(!rv.Failed());
|
||||
|
||||
const int64_t lastModifiedDate =
|
||||
NativeEndian::swapToLittleEndian(nativeEndianLastModifiedDate);
|
||||
|
||||
nsString name;
|
||||
file->GetName(name);
|
||||
|
||||
const NS_ConvertUTF16toUTF8 convName(name);
|
||||
const uint32_t convNameLength =
|
||||
NativeEndian::swapToLittleEndian(convName.Length());
|
||||
|
||||
if (!JS_WriteBytes(aWriter, &lastModifiedDate,
|
||||
sizeof(lastModifiedDate)) ||
|
||||
!JS_WriteBytes(aWriter, &convNameLength, sizeof(convNameLength)) ||
|
||||
!JS_WriteBytes(aWriter, convName.get(), convName.Length())) {
|
||||
return false;
|
||||
for (uint32_t i = 0; i < fileListLength; ++i) {
|
||||
File* file = fileList->Item(i);
|
||||
if (!WriteBlob(aCx, aWriter, file, cloneWriteInfo)) {
|
||||
return false; // Everything should fail
|
||||
}
|
||||
}
|
||||
|
||||
cloneWriteInfo->mFiles.EmplaceBack(StructuredCloneFileBase::eBlob, blob);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
Blob* blob = nullptr;
|
||||
if (NS_SUCCEEDED(UNWRAP_OBJECT(Blob, &obj, blob))) {
|
||||
return WriteBlob(aCx, aWriter, blob, cloneWriteInfo);
|
||||
}
|
||||
}
|
||||
|
||||
return StructuredCloneHolder::WriteFullySerializableObjects(aCx, aWriter,
|
||||
aObj);
|
||||
}
|
||||
|
||||
bool CopyingWriteBlob(JSContext* aCx, JSStructuredCloneWriter* aWriter,
|
||||
Blob* const aBlob,
|
||||
IDBObjectStore::StructuredCloneInfo* aCloneInfo) {
|
||||
MOZ_ASSERT(aCx);
|
||||
MOZ_ASSERT(aWriter);
|
||||
MOZ_ASSERT(aBlob);
|
||||
MOZ_ASSERT(aCloneInfo);
|
||||
|
||||
if (aCloneInfo->mFiles.Length() > size_t(UINT32_MAX)) {
|
||||
MOZ_ASSERT(false, "Fix the structured clone data to use a bigger type!");
|
||||
return false;
|
||||
}
|
||||
|
||||
const uint32_t index = aCloneInfo->mFiles.Length();
|
||||
|
||||
if (!JS_WriteUint32Pair(
|
||||
aWriter, aBlob->IsFile() ? SCTAG_DOM_FILE : SCTAG_DOM_BLOB, index)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
aCloneInfo->mFiles.EmplaceBack(StructuredCloneFileBase::eBlob, aBlob);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CopyingStructuredCloneWriteCallback(JSContext* aCx,
|
||||
JSStructuredCloneWriter* aWriter,
|
||||
JS::Handle<JSObject*> aObj,
|
||||
@@ -250,28 +314,39 @@ bool CopyingStructuredCloneWriteCallback(JSContext* aCx,
|
||||
JS::Rooted<JSObject*> obj(aCx, aObj);
|
||||
|
||||
{
|
||||
Blob* blob = nullptr;
|
||||
if (NS_SUCCEEDED(UNWRAP_OBJECT(Blob, &obj, blob))) {
|
||||
if (cloneInfo->mFiles.Length() > size_t(UINT32_MAX)) {
|
||||
FileList* fileList = nullptr;
|
||||
if (NS_SUCCEEDED(UNWRAP_OBJECT(FileList, &obj, fileList))) {
|
||||
const auto fileListStartIndex = cloneInfo->mFiles.Length();
|
||||
const uint32_t fileListLength = fileList->Length();
|
||||
|
||||
if (size_t(fileListStartIndex) > size_t(UINT32_MAX) - fileListLength) {
|
||||
MOZ_ASSERT(false,
|
||||
"Fix the structured clone data to use a bigger type!");
|
||||
return false;
|
||||
}
|
||||
|
||||
const uint32_t index = cloneInfo->mFiles.Length();
|
||||
|
||||
if (!JS_WriteUint32Pair(aWriter,
|
||||
blob->IsFile() ? SCTAG_DOM_FILE : SCTAG_DOM_BLOB,
|
||||
index)) {
|
||||
if (!JS_WriteUint32Pair(aWriter, SCTAG_DOM_FILELIST, fileListLength)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
cloneInfo->mFiles.EmplaceBack(StructuredCloneFileBase::eBlob, blob);
|
||||
for (uint32_t i = 0; i < fileList->Length(); ++i) {
|
||||
File* file = fileList->Item(i);
|
||||
if (!CopyingWriteBlob(aCx, aWriter, file, cloneInfo)) {
|
||||
return false; // Everything should fail
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
Blob* blob = nullptr;
|
||||
if (NS_SUCCEEDED(UNWRAP_OBJECT(Blob, &obj, blob))) {
|
||||
return CopyingWriteBlob(aCx, aWriter, blob, cloneInfo);
|
||||
}
|
||||
}
|
||||
|
||||
return StructuredCloneHolder::WriteFullySerializableObjects(aCx, aWriter,
|
||||
aObj);
|
||||
}
|
||||
@@ -325,6 +400,82 @@ JSObject* CopyingStructuredCloneReadCallback(
|
||||
void* aClosure) {
|
||||
MOZ_ASSERT(aTag != SCTAG_DOM_FILE_WITHOUT_LASTMODIFIEDDATE);
|
||||
|
||||
if (aTag == SCTAG_DOM_FILELIST) {
|
||||
auto* const cloneInfo =
|
||||
static_cast<IDBObjectStore::StructuredCloneInfo*>(aClosure);
|
||||
|
||||
// For empty filelist, aData is not used but must remain within bounds.
|
||||
const auto& files = cloneInfo->mFiles;
|
||||
const uint32_t fileListLength = aData;
|
||||
|
||||
if (fileListLength > files.Length()) {
|
||||
MOZ_ASSERT(false, "Bad file list length value!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// We need to ensure that all RAII smart pointers which may trigger GC are
|
||||
// destroyed on return prior to this JS::Rooted being destroyed and
|
||||
// unrooting the pointer. This scope helps make this intent more explicit.
|
||||
JS::Rooted<JSObject*> obj(aCx);
|
||||
{
|
||||
nsCOMPtr<nsIGlobalObject> global = xpc::CurrentNativeGlobal(aCx);
|
||||
if (!global) {
|
||||
MOZ_ASSERT(false, "Could not access global!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
RefPtr<FileList> fileList = new FileList(global);
|
||||
|
||||
for (uint32_t i = 0u; i < fileListLength; ++i) {
|
||||
uint32_t tag = UINT32_MAX;
|
||||
uint32_t index = UINT32_MAX;
|
||||
if (!JS_ReadUint32Pair(aReader, &tag, &index)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const bool hasFileTag = tag == SCTAG_DOM_FILE;
|
||||
if (!hasFileTag) {
|
||||
MOZ_ASSERT(false, "Unexpected tag!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (uint64_t(index) >= cloneInfo->mFiles.Length()) {
|
||||
MOZ_ASSERT(false, "Bad index!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
StructuredCloneFileChild& fileChild = cloneInfo->mFiles[index];
|
||||
MOZ_ASSERT(fileChild.Type() == StructuredCloneFileBase::eBlob);
|
||||
|
||||
RefPtr<Blob> blob = fileChild.BlobPtr();
|
||||
MOZ_ASSERT(blob->IsFile());
|
||||
|
||||
RefPtr<File> file = blob->ToFile();
|
||||
if (!file) {
|
||||
MOZ_ASSERT(false, "Could not convert blob to file!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!fileList->Append(file)) {
|
||||
MOZ_ASSERT(false, "Could not extend filelist!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
if (!WrapAsJSObject(aCx, fileList, &obj)) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
if (aTag == SCTAG_DOM_BLOB || aTag == SCTAG_DOM_FILE ||
|
||||
aTag == SCTAG_DOM_MUTABLEFILE) {
|
||||
auto* const cloneInfo =
|
||||
@@ -783,7 +934,7 @@ RefPtr<IDBRequest> IDBObjectStore::AddOrPut(JSContext* aCx,
|
||||
commonParams.key() = key;
|
||||
commonParams.indexUpdateInfos() = std::move(updateInfos);
|
||||
|
||||
// Convert any blobs or mutable files into FileAddInfo.
|
||||
// Convert any blobs or mutable files into FileAddInfos.
|
||||
QM_TRY_UNWRAP(
|
||||
commonParams.fileAddInfos(),
|
||||
TransformIntoNewArrayAbortOnErr(
|
||||
|
||||
@@ -10,8 +10,10 @@
|
||||
#include "IDBDatabase.h"
|
||||
|
||||
#include "mozilla/dom/FileBlobImpl.h"
|
||||
#include "mozilla/dom/FileList.h"
|
||||
#include "mozilla/dom/StructuredCloneTags.h"
|
||||
#include "mozilla/dom/URLSearchParams.h"
|
||||
#include "mozilla/dom/WorkerPrivate.h"
|
||||
#include "mozilla/dom/WorkerScope.h"
|
||||
#include "MainThreadUtils.h"
|
||||
#include "jsapi.h"
|
||||
@@ -200,6 +202,31 @@ class ValueDeserializationHelperBase {
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename StructuredCloneFile>
|
||||
static already_AddRefed<File> CreateUnwrappedFile(
|
||||
JSContext* aCx, IDBDatabase* aDatabase, const StructuredCloneFile& aFile,
|
||||
const BlobOrFileData& aData) {
|
||||
MOZ_ASSERT(aCx);
|
||||
MOZ_ASSERT(aData.tag == SCTAG_DOM_FILE ||
|
||||
aData.tag == SCTAG_DOM_FILE_WITHOUT_LASTMODIFIEDDATE);
|
||||
MOZ_ASSERT(aFile.Type() == StructuredCloneFileBase::eBlob);
|
||||
|
||||
const auto blob = ValueDeserializationHelper<StructuredCloneFile>::GetBlob(
|
||||
aCx, aDatabase, aFile);
|
||||
if (NS_WARN_IF(!blob)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
blob->Impl()->SetLazyData(aData.name, aData.type, aData.size,
|
||||
aData.lastModifiedDate * PR_USEC_PER_MSEC);
|
||||
|
||||
MOZ_ASSERT(blob->IsFile());
|
||||
RefPtr<File> file = blob->ToFile();
|
||||
MOZ_ASSERT(file);
|
||||
|
||||
return file.forget();
|
||||
}
|
||||
|
||||
template <typename StructuredCloneFile>
|
||||
static bool CreateAndWrapBlobOrFile(JSContext* aCx, IDBDatabase* aDatabase,
|
||||
const StructuredCloneFile& aFile,
|
||||
@@ -211,45 +238,42 @@ class ValueDeserializationHelperBase {
|
||||
aData.tag == SCTAG_DOM_BLOB);
|
||||
MOZ_ASSERT(aFile.Type() == StructuredCloneFileBase::eBlob);
|
||||
|
||||
if (aData.tag == SCTAG_DOM_FILE ||
|
||||
aData.tag == SCTAG_DOM_FILE_WITHOUT_LASTMODIFIEDDATE) {
|
||||
RefPtr<File> file =
|
||||
ValueDeserializationHelper<StructuredCloneFile>::CreateUnwrappedFile(
|
||||
aCx, aDatabase, aFile, aData);
|
||||
return WrapAsJSObject(aCx, file, aResult);
|
||||
}
|
||||
|
||||
const auto blob = ValueDeserializationHelper<StructuredCloneFile>::GetBlob(
|
||||
aCx, aDatabase, aFile);
|
||||
if (NS_WARN_IF(!blob)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (aData.tag == SCTAG_DOM_BLOB) {
|
||||
blob->Impl()->SetLazyData(VoidString(), aData.type, aData.size,
|
||||
INT64_MAX);
|
||||
MOZ_ASSERT(!blob->IsFile());
|
||||
MOZ_ASSERT(aData.tag == SCTAG_DOM_BLOB);
|
||||
blob->Impl()->SetLazyData(VoidString(), aData.type, aData.size, INT64_MAX);
|
||||
MOZ_ASSERT(!blob->IsFile());
|
||||
|
||||
// XXX The comment below is somewhat confusing, since it seems to imply
|
||||
// that this branch is only executed when called from ActorsParent, but
|
||||
// it's executed from both the parent and the child side code.
|
||||
// XXX The comment below is somewhat confusing, since it seems to imply
|
||||
// that this branch is only executed when called from ActorsParent, but
|
||||
// it's executed from both the parent and the child side code.
|
||||
|
||||
// ActorsParent sends here a kind of half blob and half file wrapped into
|
||||
// a DOM File object. DOM File and DOM Blob are a WebIDL wrapper around a
|
||||
// BlobImpl object. SetLazyData() has just changed the BlobImpl to be a
|
||||
// Blob (see the previous assert), but 'blob' still has the WebIDL DOM
|
||||
// File wrapping.
|
||||
// Before exposing it to content, we must recreate a DOM Blob object.
|
||||
// ActorsParent sends here a kind of half blob and half file wrapped into
|
||||
// a DOM File object. DOM File and DOM Blob are a WebIDL wrapper around a
|
||||
// BlobImpl object. SetLazyData() has just changed the BlobImpl to be a
|
||||
// Blob (see the previous assert), but 'blob' still has the WebIDL DOM
|
||||
// File wrapping.
|
||||
// Before exposing it to content, we must recreate a DOM Blob object.
|
||||
|
||||
const RefPtr<Blob> exposedBlob =
|
||||
Blob::Create(blob->GetParentObject(), blob->Impl());
|
||||
if (NS_WARN_IF(!exposedBlob)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return WrapAsJSObject(aCx, exposedBlob, aResult);
|
||||
const RefPtr<Blob> exposedBlob =
|
||||
Blob::Create(blob->GetParentObject(), blob->Impl());
|
||||
if (NS_WARN_IF(!exposedBlob)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
blob->Impl()->SetLazyData(aData.name, aData.type, aData.size,
|
||||
aData.lastModifiedDate * PR_USEC_PER_MSEC);
|
||||
|
||||
MOZ_ASSERT(blob->IsFile());
|
||||
const RefPtr<File> file = blob->ToFile();
|
||||
MOZ_ASSERT(file);
|
||||
|
||||
return WrapAsJSObject(aCx, file, aResult);
|
||||
return WrapAsJSObject(aCx, exposedBlob, aResult);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -360,7 +384,8 @@ JSObject* CommonStructuredCloneReadCallback(
|
||||
SCTAG_DOM_MUTABLEFILE == 0xffff8004 &&
|
||||
SCTAG_DOM_FILE == 0xffff8005 &&
|
||||
SCTAG_DOM_WASM_MODULE == 0xffff8006 &&
|
||||
SCTAG_DOM_URLSEARCHPARAMS == 0xffff8014,
|
||||
SCTAG_DOM_URLSEARCHPARAMS == 0xffff8014 &&
|
||||
SCTAG_DOM_FILELIST == 0xffff8003,
|
||||
"You changed our structured clone tag values and just ate "
|
||||
"everyone's IndexedDB data. I hope you are happy.");
|
||||
|
||||
@@ -406,6 +431,81 @@ JSObject* CommonStructuredCloneReadCallback(
|
||||
using StructuredCloneFile =
|
||||
typename StructuredCloneReadInfo::StructuredCloneFile;
|
||||
|
||||
if (aTag == SCTAG_DOM_FILELIST) {
|
||||
const auto& files = aCloneReadInfo->Files();
|
||||
|
||||
uint32_t fileListLength = aData;
|
||||
|
||||
if (fileListLength > files.Length()) {
|
||||
MOZ_ASSERT(false, "Bad file list length value!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// We need to ensure that all RAII smart pointers which may trigger GC are
|
||||
// destroyed on return prior to this JS::Rooted being destroyed and
|
||||
// unrooting the pointer. This scope helps make this intent more explicit.
|
||||
JS::Rooted<JSObject*> obj(aCx);
|
||||
{
|
||||
nsCOMPtr<nsIGlobalObject> global = xpc::CurrentNativeGlobal(aCx);
|
||||
if (!global) {
|
||||
MOZ_ASSERT(false, "Could not access global!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
RefPtr<FileList> fileList = new FileList(global);
|
||||
|
||||
for (uint32_t i = 0u; i < fileListLength; ++i) {
|
||||
uint32_t tag = UINT32_MAX;
|
||||
uint32_t index = UINT32_MAX;
|
||||
if (!JS_ReadUint32Pair(aReader, &tag, &index)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (tag != SCTAG_DOM_FILE) {
|
||||
MOZ_ASSERT(false, "Unexpected tag!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (uint64_t(index) >= files.Length()) {
|
||||
MOZ_ASSERT(false, "Bad index!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
BlobOrFileData data;
|
||||
if (NS_WARN_IF(!ReadBlobOrFile(aReader, tag, &data))) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto& fileObj = aCloneReadInfo->MutableFile(index);
|
||||
|
||||
RefPtr<File> file = ValueDeserializationHelper<
|
||||
StructuredCloneFile>::CreateUnwrappedFile(aCx, aDatabase, fileObj,
|
||||
data);
|
||||
if (!file) {
|
||||
MOZ_ASSERT(false, "Could not deserialize file!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!fileList->Append(file)) {
|
||||
MOZ_ASSERT(false, "Could not extend filelist!");
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
if (!WrapAsJSObject(aCx, fileList, &obj)) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
if (aTag == SCTAG_DOM_FILE_WITHOUT_LASTMODIFIEDDATE ||
|
||||
aTag == SCTAG_DOM_BLOB || aTag == SCTAG_DOM_FILE ||
|
||||
aTag == SCTAG_DOM_MUTABLEFILE || aTag == SCTAG_DOM_WASM_MODULE) {
|
||||
|
||||
@@ -239,6 +239,8 @@ skip-if = ["os == 'android'"]
|
||||
|
||||
["test_file_transaction_abort.html"]
|
||||
|
||||
["test_file_filelist.html"]
|
||||
|
||||
["test_getAll.html"]
|
||||
|
||||
["test_getFileId.html"]
|
||||
|
||||
146
dom/indexedDB/test/test_file_filelist.html
Normal file
146
dom/indexedDB/test/test_file_filelist.html
Normal file
@@ -0,0 +1,146 @@
|
||||
<!--
|
||||
Any copyright is dedicated to the Public Domain.
|
||||
http://creativecommons.org/publicdomain/zero/1.0/
|
||||
-->
|
||||
<html>
|
||||
<head>
|
||||
<title>Indexed Database Filelist Serialization Blob Sharing Test</title>
|
||||
|
||||
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
|
||||
|
||||
<script src="/tests/SimpleTest/SimpleTest.js"></script>
|
||||
|
||||
<script type="text/javascript">
|
||||
// Arbitrary values chosen to ensure
|
||||
// we have at least a first/middle/last file and a moderate payload size
|
||||
// that we would never choose to store inline
|
||||
// in the structured serialization payload
|
||||
// if we started doing that for small blobs/files.
|
||||
const fileListSize = 3;
|
||||
const elementSize = 100000;
|
||||
|
||||
async function testSteps() {
|
||||
const makeFileList = aFile => {
|
||||
const dataTransfer = new DataTransfer();
|
||||
for (let i = 0; i < fileListSize; ++i) {
|
||||
// Currently it's legal to add the same File
|
||||
// to a FileList multiple times
|
||||
// but this may change in the future;
|
||||
// there was some brief discussion about this at TPAC 2024.
|
||||
dataTransfer.items.add(aFile);
|
||||
}
|
||||
|
||||
return dataTransfer.files;
|
||||
};
|
||||
|
||||
const dbName = window.location.pathname;
|
||||
|
||||
// This is a test of IndexedDB's Blob/File-interning logic
|
||||
// and we expect randomFile to be persisted to disk
|
||||
// by our IndexedDB impl exactly once and so all Files
|
||||
// retrieved from the database should have the same underlying file id.
|
||||
const randomFile = getRandomFile("random.bin", elementSize);
|
||||
const fileId = 1;
|
||||
|
||||
const objectStoreInfo = [
|
||||
{
|
||||
name: "FileLists",
|
||||
options: {},
|
||||
data: { key: "A", fileList: makeFileList(randomFile) },
|
||||
},
|
||||
{
|
||||
name: "Other FileLists",
|
||||
options: {},
|
||||
data: { key: "B", fileList: makeFileList(randomFile) },
|
||||
},
|
||||
];
|
||||
|
||||
let request = indexedDB.open(dbName, /*version*/ 1);
|
||||
let event = await expectingUpgrade(request);
|
||||
let db = event.target.result;
|
||||
db.onerror = errorHandler;
|
||||
|
||||
// Add filelists in version change transaction
|
||||
for (let info of objectStoreInfo) {
|
||||
let objectStore = db.createObjectStore(info.name, info.options);
|
||||
objectStore.add(info.data.fileList, info.data.key);
|
||||
}
|
||||
|
||||
event = await expectingSuccess(request);
|
||||
db = event.target.result;
|
||||
db.onerror = errorHandler;
|
||||
|
||||
let refResult;
|
||||
let refList;
|
||||
for (let info of objectStoreInfo) {
|
||||
let objectStore = db.transaction([info.name]).objectStore(info.name);
|
||||
|
||||
event = await expectingSuccess(objectStore.get(info.data.key));
|
||||
let result = event.target.result;
|
||||
|
||||
if (!refList) {
|
||||
refList = result;
|
||||
}
|
||||
|
||||
const expectedLength = info.data.fileList.length;
|
||||
is(result.length, expectedLength, "Do filelist lengths match?");
|
||||
for (let i = 0; i < result.length; ++i) {
|
||||
await verifyBlobAsync(result.item(i), randomFile, fileId);
|
||||
|
||||
if (!refResult) {
|
||||
refResult = result.item(i);
|
||||
continue;
|
||||
}
|
||||
|
||||
is(
|
||||
getFilePath(result.item(i)),
|
||||
getFilePath(refResult),
|
||||
"The same os file"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Add filelist in a regular read-write transaction
|
||||
for (let i = 0; i < objectStoreInfo.length; i++) {
|
||||
let info = objectStoreInfo[i];
|
||||
|
||||
let objectStore = db
|
||||
.transaction([info.name], "readwrite")
|
||||
.objectStore(info.name);
|
||||
|
||||
request = objectStore.add(refList, "C");
|
||||
event = await expectingSuccess(request);
|
||||
|
||||
is(event.target.result, "C", "Got correct key");
|
||||
|
||||
request = objectStore.get("C");
|
||||
event = await expectingSuccess(request);
|
||||
|
||||
let result = event.target.result;
|
||||
const expectedLength = info.data.fileList.length;
|
||||
is(result.length, expectedLength, "Do filelist lengths match?");
|
||||
for (let i = 0; i < result.length; ++i) {
|
||||
await verifyBlobAsync(result.item(i), randomFile, fileId);
|
||||
|
||||
is(
|
||||
getFilePath(result.item(i)),
|
||||
getFilePath(refResult),
|
||||
"The same os file"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Two object store infos * two file lists * three items plus
|
||||
// original file called randomFile
|
||||
is(bufferCache.length, 13, "Correct length");
|
||||
}
|
||||
</script>
|
||||
<script type="text/javascript" src="file.js"></script>
|
||||
<script type="text/javascript" src="helpers.js"></script>
|
||||
|
||||
</head>
|
||||
|
||||
<body onload="runTest()">
|
||||
</body>
|
||||
|
||||
</html>
|
||||
@@ -15,9 +15,6 @@
|
||||
[Not serializable: symbol: Symbol(desc)]
|
||||
expected: [PASS, FAIL]
|
||||
|
||||
[FileList: [object FileList\]]
|
||||
expected: FAIL
|
||||
|
||||
[structured-clone.any.worker.html?101-last]
|
||||
expected: [OK, ERROR]
|
||||
|
||||
|
||||
@@ -0,0 +1,228 @@
|
||||
<!--
|
||||
Any copyright is dedicated to the Public Domain.
|
||||
http://creativecommons.org/publicdomain/zero/1.0/
|
||||
-->
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="timeout" content="long">
|
||||
<script src="/resources/testharness.js"></script>
|
||||
<script src="/resources/testharnessreport.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
|
||||
var fileCounter = 0;
|
||||
var dbCounter = 0;
|
||||
|
||||
const fileListSize = 3;
|
||||
const elementSize = 8196;
|
||||
|
||||
/**
|
||||
* Acknowledgement:
|
||||
* This test takes inspiration from IndexedDB/structured-clone.any.js
|
||||
* but the focus is on the variations of the filelist serialization.
|
||||
*/
|
||||
function addCloneTest(testName, orig, verifyFunc) {
|
||||
promise_test(async t => {
|
||||
const requestToFinish = req => {
|
||||
return new Promise((resolve, reject) => {
|
||||
req.onerror = () => {
|
||||
reject(req.error);
|
||||
};
|
||||
req.onblocked = () => {
|
||||
reject("Unexpected block");
|
||||
};
|
||||
req.onupgradeneeded = () => {
|
||||
reject("Unexpected upgrade");
|
||||
};
|
||||
req.onsuccess = ev => {
|
||||
resolve(ev.target.result);
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
const txEvents = [
|
||||
"abort",
|
||||
"complete",
|
||||
"error",
|
||||
];
|
||||
|
||||
const dbName = "db_" + dbCounter;
|
||||
++dbCounter;
|
||||
|
||||
const performRequest = async (query) => {
|
||||
const db = await new Promise((resolve, reject) => {
|
||||
const openReq = indexedDB.open(dbName, 1);
|
||||
openReq.onerror = () => {
|
||||
reject(openReq.error);
|
||||
};
|
||||
openReq.onupgradeneeded = ev => {
|
||||
const dbObj = ev.target.result;
|
||||
const store = dbObj.createObjectStore("store");
|
||||
// This index is not used, but evaluating key path on each put()
|
||||
// call will exercise (de)serialization.
|
||||
store.createIndex("index", "dummyKeyPath");
|
||||
};
|
||||
openReq.onsuccess = () => {
|
||||
resolve(openReq.result);
|
||||
};
|
||||
});
|
||||
|
||||
t.add_cleanup(() => {
|
||||
if (db) {
|
||||
db.close();
|
||||
indexedDB.deleteDatabase(db.name);
|
||||
}
|
||||
});
|
||||
|
||||
let result = undefined;
|
||||
try {
|
||||
const tx = db.transaction("store", "readwrite");
|
||||
const store = tx.objectStore("store");
|
||||
result = await requestToFinish(query(store));
|
||||
await new EventWatcher(t, tx, txEvents).wait_for("complete");
|
||||
} finally {
|
||||
db.close();
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
await performRequest(store => store.put(orig, "key"));
|
||||
const clone = await performRequest(store => store.get("key"));
|
||||
|
||||
assert_not_equals(orig, clone);
|
||||
await verifyFunc(orig, clone);
|
||||
}, testName);
|
||||
}
|
||||
|
||||
function makeFileList(dataGenerators) {
|
||||
const fileOpts = { type: "text/plain" };
|
||||
const dataTransfer = new DataTransfer();
|
||||
dataGenerators.forEach((generator, i) => {
|
||||
const file = new File(generator(i), "test_" + fileCounter, fileOpts);
|
||||
dataTransfer.items.add(file);
|
||||
++fileCounter;
|
||||
});
|
||||
|
||||
return dataTransfer.files;
|
||||
}
|
||||
|
||||
const compareCloneToOrig = async (orig, clone) => {
|
||||
assert_equals(orig.length, clone.length);
|
||||
assert_equals(orig.length, fileListSize);
|
||||
for (let i = 0; i < orig.length; ++i) {
|
||||
const origFile = orig.item(i);
|
||||
const cloneFile = clone.item(i);
|
||||
assert_equals(origFile.name, cloneFile.name);
|
||||
assert_equals(await origFile.text(), await cloneFile.text());
|
||||
}
|
||||
};
|
||||
|
||||
const compareObjects = async (orig, clone) => {
|
||||
assert_true("value" in orig);
|
||||
assert_true("value" in clone);
|
||||
|
||||
return await compareCloneToOrig(orig.value, clone.value);
|
||||
};
|
||||
|
||||
const compareArrays = async (orig, clone) => {
|
||||
assert_equals(orig.length, 1);
|
||||
assert_equals(clone.length, 1);
|
||||
|
||||
return await compareCloneToOrig(orig[0], clone[0]);
|
||||
};
|
||||
|
||||
const randomLetters = n => {
|
||||
const chars = "abcd";
|
||||
const someLetter = () => chars[Math.floor(Math.random() * chars.length)];
|
||||
return Array(n).fill().map(someLetter).join("");
|
||||
};
|
||||
|
||||
// FileList - exposed in Workers, but not constructable.
|
||||
if ("document" in self) {
|
||||
const addTestCases = (dataName, dataGenerator) => {
|
||||
const fileListStatic = makeFileList(
|
||||
Array(fileListSize).fill(dataGenerator)
|
||||
);
|
||||
|
||||
addCloneTest(
|
||||
"Serialize filelist containing " + dataName,
|
||||
fileListStatic,
|
||||
compareCloneToOrig
|
||||
);
|
||||
|
||||
addCloneTest(
|
||||
"Serialize object with filelist containing " + dataName,
|
||||
{ value: fileListStatic },
|
||||
compareObjects
|
||||
);
|
||||
|
||||
addCloneTest(
|
||||
"Serialize array with filelist containing " + dataName,
|
||||
[fileListStatic],
|
||||
compareArrays
|
||||
);
|
||||
|
||||
const baseData = dataGenerator();
|
||||
|
||||
// Currently it's legal for the same File to appear in a FileList
|
||||
// multiple times. This was the subject of some brief discussion
|
||||
// at TPAC 2024 and it's possible that as FileList moves entirely
|
||||
// into the HTML spec this may change.
|
||||
// In the meantime we want to make sure we support this case and
|
||||
// that IndexedDB's optimizations related to File-interning
|
||||
// don't break things, although that logic is tested more thoroughly
|
||||
// in test_file_filelist.html
|
||||
const fileListRepeated = makeFileList(
|
||||
Array(fileListSize).fill(() => {
|
||||
return baseData;
|
||||
})
|
||||
);
|
||||
|
||||
addCloneTest(
|
||||
"Serialize filelist containing repeated " + dataName,
|
||||
fileListRepeated,
|
||||
compareCloneToOrig
|
||||
);
|
||||
|
||||
addCloneTest(
|
||||
"Serialize object with filelist containing repeated " + dataName,
|
||||
{ value: fileListRepeated },
|
||||
compareObjects
|
||||
);
|
||||
|
||||
addCloneTest(
|
||||
"Serialize array with filelist containing repeated " + dataName,
|
||||
[fileListRepeated],
|
||||
compareArrays
|
||||
);
|
||||
};
|
||||
|
||||
const genString = () => {
|
||||
return [randomLetters(elementSize)];
|
||||
};
|
||||
|
||||
addTestCases("random string", genString);
|
||||
|
||||
const genArray = () => {
|
||||
const array = new Uint32Array(elementSize);
|
||||
crypto.getRandomValues(array);
|
||||
return array;
|
||||
};
|
||||
|
||||
addTestCases("random typed array", genArray);
|
||||
|
||||
const genBlob = () => {
|
||||
const array = new Uint32Array(elementSize);
|
||||
crypto.getRandomValues(array);
|
||||
return [new Blob(array)];
|
||||
};
|
||||
|
||||
addTestCases("random blob", genBlob);
|
||||
}
|
||||
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
Reference in New Issue
Block a user