Backed out 14 changesets (bug 1652884) for VideoFrameConverter related failures CLOSED TREE
Backed out changeset 28c4e8c373f0 (bug 1652884) Backed out changeset 658ba8f39abe (bug 1652884) Backed out changeset 8e67fe040e4a (bug 1652884) Backed out changeset 6f5833203763 (bug 1652884) Backed out changeset 569ff85dfc2e (bug 1652884) Backed out changeset eaa171643447 (bug 1652884) Backed out changeset 6b37b60b6662 (bug 1652884) Backed out changeset 438cce7456fb (bug 1652884) Backed out changeset e6ed13952b67 (bug 1652884) Backed out changeset e0b1266231bf (bug 1652884) Backed out changeset 32f4aae2b5fe (bug 1652884) Backed out changeset 76b4abccd61b (bug 1652884) Backed out changeset 9010365ffa66 (bug 1652884) Backed out changeset 763f39eb5c13 (bug 1652884)
This commit is contained in:
@@ -122,20 +122,6 @@ class WebRTCChild extends JSWindowActorChild {
|
|||||||
aMessage.data
|
aMessage.data
|
||||||
);
|
);
|
||||||
break;
|
break;
|
||||||
case "webrtc:MuteCamera":
|
|
||||||
Services.obs.notifyObservers(
|
|
||||||
null,
|
|
||||||
"getUserMedia:muteVideo",
|
|
||||||
aMessage.data
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
case "webrtc:UnmuteCamera":
|
|
||||||
Services.obs.notifyObservers(
|
|
||||||
null,
|
|
||||||
"getUserMedia:unmuteVideo",
|
|
||||||
aMessage.data
|
|
||||||
);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,21 +1,6 @@
|
|||||||
/* Any copyright is dedicated to the Public Domain.
|
/* Any copyright is dedicated to the Public Domain.
|
||||||
* http://creativecommons.org/publicdomain/zero/1.0/ */
|
* http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||||
|
|
||||||
async function setCameraMuted(mute) {
|
|
||||||
const windowId = gBrowser.selectedBrowser.innerWindowID;
|
|
||||||
return SpecialPowers.spawn(
|
|
||||||
gBrowser.selectedBrowser,
|
|
||||||
[{ mute, windowId }],
|
|
||||||
function(args) {
|
|
||||||
Services.obs.notifyObservers(
|
|
||||||
content.window,
|
|
||||||
args.mute ? "getUserMedia:muteVideo" : "getUserMedia:unmuteVideo",
|
|
||||||
JSON.stringify(args.windowId)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
function setTrackEnabled(audio, video) {
|
function setTrackEnabled(audio, video) {
|
||||||
return SpecialPowers.spawn(
|
return SpecialPowers.spawn(
|
||||||
gBrowser.selectedBrowser,
|
gBrowser.selectedBrowser,
|
||||||
@@ -32,22 +17,6 @@ function setTrackEnabled(audio, video) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
async function getVideoTrackMuted() {
|
|
||||||
return SpecialPowers.spawn(
|
|
||||||
gBrowser.selectedBrowser,
|
|
||||||
[],
|
|
||||||
() => content.wrappedJSObject.gStreams[0].getVideoTracks()[0].muted
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
async function getVideoTrackEvents() {
|
|
||||||
return SpecialPowers.spawn(
|
|
||||||
gBrowser.selectedBrowser,
|
|
||||||
[],
|
|
||||||
() => content.wrappedJSObject.gVideoEvents
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
function cloneTracks(audio, video) {
|
function cloneTracks(audio, video) {
|
||||||
return SpecialPowers.spawn(
|
return SpecialPowers.spawn(
|
||||||
gBrowser.selectedBrowser,
|
gBrowser.selectedBrowser,
|
||||||
@@ -94,7 +63,7 @@ var gTests = [
|
|||||||
{
|
{
|
||||||
desc:
|
desc:
|
||||||
"getUserMedia audio+video: disabling the stream shows the paused indicator",
|
"getUserMedia audio+video: disabling the stream shows the paused indicator",
|
||||||
run: async function checkDisabled() {
|
run: async function checkPaused() {
|
||||||
let observerPromise = expectObserverCalled("getUserMedia:request");
|
let observerPromise = expectObserverCalled("getUserMedia:request");
|
||||||
let promise = promisePopupNotificationShown("webRTC-shareDevices");
|
let promise = promisePopupNotificationShown("webRTC-shareDevices");
|
||||||
await promiseRequestDevice(true, true);
|
await promiseRequestDevice(true, true);
|
||||||
@@ -127,7 +96,8 @@ var gTests = [
|
|||||||
observerPromise = expectObserverCalled("recording-device-events", 2);
|
observerPromise = expectObserverCalled("recording-device-events", 2);
|
||||||
await setTrackEnabled(false, false);
|
await setTrackEnabled(false, false);
|
||||||
|
|
||||||
// Wait for capture state to propagate to the UI asynchronously.
|
// It sometimes takes a bit longer before the change propagates to the UI,
|
||||||
|
// wait for it to avoid intermittents.
|
||||||
await BrowserTestUtils.waitForCondition(
|
await BrowserTestUtils.waitForCondition(
|
||||||
() =>
|
() =>
|
||||||
window.gIdentityHandler._sharingState.webRTC.camera ==
|
window.gIdentityHandler._sharingState.webRTC.camera ==
|
||||||
@@ -187,7 +157,7 @@ var gTests = [
|
|||||||
{
|
{
|
||||||
desc:
|
desc:
|
||||||
"getUserMedia audio+video: disabling the original tracks and stopping enabled clones shows the paused indicator",
|
"getUserMedia audio+video: disabling the original tracks and stopping enabled clones shows the paused indicator",
|
||||||
run: async function checkDisabledAfterCloneStop() {
|
run: async function checkPausedAfterCloneStop() {
|
||||||
let observerPromise = expectObserverCalled("getUserMedia:request");
|
let observerPromise = expectObserverCalled("getUserMedia:request");
|
||||||
let promise = promisePopupNotificationShown("webRTC-shareDevices");
|
let promise = promisePopupNotificationShown("webRTC-shareDevices");
|
||||||
await promiseRequestDevice(true, true);
|
await promiseRequestDevice(true, true);
|
||||||
@@ -227,7 +197,8 @@ var gTests = [
|
|||||||
// Stop the clones. This should disable the sharing indicators.
|
// Stop the clones. This should disable the sharing indicators.
|
||||||
await stopClonedTracks(true, true);
|
await stopClonedTracks(true, true);
|
||||||
|
|
||||||
// Wait for capture state to propagate to the UI asynchronously.
|
// It sometimes takes a bit longer before the change propagates to the UI,
|
||||||
|
// wait for it to avoid intermittents.
|
||||||
await BrowserTestUtils.waitForCondition(
|
await BrowserTestUtils.waitForCondition(
|
||||||
() =>
|
() =>
|
||||||
window.gIdentityHandler._sharingState.webRTC.camera ==
|
window.gIdentityHandler._sharingState.webRTC.camera ==
|
||||||
@@ -289,7 +260,7 @@ var gTests = [
|
|||||||
{
|
{
|
||||||
desc:
|
desc:
|
||||||
"getUserMedia screen: disabling the stream shows the paused indicator",
|
"getUserMedia screen: disabling the stream shows the paused indicator",
|
||||||
run: async function checkScreenDisabled() {
|
run: async function checkScreenPaused() {
|
||||||
let observerPromise = expectObserverCalled("getUserMedia:request");
|
let observerPromise = expectObserverCalled("getUserMedia:request");
|
||||||
let promise = promisePopupNotificationShown("webRTC-shareDevices");
|
let promise = promisePopupNotificationShown("webRTC-shareDevices");
|
||||||
await promiseRequestDevice(false, true, null, "screen");
|
await promiseRequestDevice(false, true, null, "screen");
|
||||||
@@ -331,7 +302,8 @@ var gTests = [
|
|||||||
observerPromise = expectObserverCalled("recording-device-events");
|
observerPromise = expectObserverCalled("recording-device-events");
|
||||||
await setTrackEnabled(null, false);
|
await setTrackEnabled(null, false);
|
||||||
|
|
||||||
// Wait for capture state to propagate to the UI asynchronously.
|
// It sometimes takes a bit longer before the change propagates to the UI,
|
||||||
|
// wait for it to avoid intermittents.
|
||||||
await BrowserTestUtils.waitForCondition(
|
await BrowserTestUtils.waitForCondition(
|
||||||
() =>
|
() =>
|
||||||
window.gIdentityHandler._sharingState.webRTC.screen == "ScreenPaused",
|
window.gIdentityHandler._sharingState.webRTC.screen == "ScreenPaused",
|
||||||
@@ -354,311 +326,6 @@ var gTests = [
|
|||||||
await closeStream();
|
await closeStream();
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
|
||||||
desc:
|
|
||||||
"getUserMedia audio+video: muting the camera shows the muted indicator",
|
|
||||||
run: async function checkMuted() {
|
|
||||||
let observerPromise = expectObserverCalled("getUserMedia:request");
|
|
||||||
let promise = promisePopupNotificationShown("webRTC-shareDevices");
|
|
||||||
await promiseRequestDevice(true, true);
|
|
||||||
await promise;
|
|
||||||
await observerPromise;
|
|
||||||
checkDeviceSelectors(true, true);
|
|
||||||
|
|
||||||
let indicator = promiseIndicatorWindow();
|
|
||||||
let observerPromise1 = expectObserverCalled(
|
|
||||||
"getUserMedia:response:allow"
|
|
||||||
);
|
|
||||||
let observerPromise2 = expectObserverCalled("recording-device-events");
|
|
||||||
await promiseMessage("ok", () => {
|
|
||||||
PopupNotifications.panel.firstElementChild.button.click();
|
|
||||||
});
|
|
||||||
await observerPromise1;
|
|
||||||
await observerPromise2;
|
|
||||||
Assert.deepEqual(
|
|
||||||
await getMediaCaptureState(),
|
|
||||||
{ audio: true, video: true },
|
|
||||||
"expected camera and microphone to be shared"
|
|
||||||
);
|
|
||||||
await indicator;
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_ENABLED,
|
|
||||||
audio: STATE_CAPTURE_ENABLED,
|
|
||||||
});
|
|
||||||
is(await getVideoTrackMuted(), false, "video track starts unmuted");
|
|
||||||
Assert.deepEqual(
|
|
||||||
await getVideoTrackEvents(),
|
|
||||||
[],
|
|
||||||
"no video track events fired yet"
|
|
||||||
);
|
|
||||||
|
|
||||||
// Mute camera.
|
|
||||||
observerPromise = expectObserverCalled("recording-device-events");
|
|
||||||
await setCameraMuted(true);
|
|
||||||
|
|
||||||
// Wait for capture state to propagate to the UI asynchronously.
|
|
||||||
await BrowserTestUtils.waitForCondition(
|
|
||||||
() =>
|
|
||||||
window.gIdentityHandler._sharingState.webRTC.camera ==
|
|
||||||
STATE_CAPTURE_DISABLED,
|
|
||||||
"video should be muted"
|
|
||||||
);
|
|
||||||
|
|
||||||
await observerPromise;
|
|
||||||
|
|
||||||
// The identity UI should show only camera as disabled.
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_DISABLED,
|
|
||||||
audio: STATE_CAPTURE_ENABLED,
|
|
||||||
});
|
|
||||||
is(await getVideoTrackMuted(), true, "video track is muted");
|
|
||||||
Assert.deepEqual(await getVideoTrackEvents(), ["mute"], "mute fired");
|
|
||||||
|
|
||||||
// Unmute video again.
|
|
||||||
observerPromise = expectObserverCalled("recording-device-events");
|
|
||||||
await setCameraMuted(false);
|
|
||||||
|
|
||||||
await BrowserTestUtils.waitForCondition(
|
|
||||||
() =>
|
|
||||||
window.gIdentityHandler._sharingState.webRTC.camera ==
|
|
||||||
STATE_CAPTURE_ENABLED,
|
|
||||||
"video should be enabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
await observerPromise;
|
|
||||||
|
|
||||||
// Both streams should show as running.
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_ENABLED,
|
|
||||||
audio: STATE_CAPTURE_ENABLED,
|
|
||||||
});
|
|
||||||
is(await getVideoTrackMuted(), false, "video track is unmuted");
|
|
||||||
Assert.deepEqual(
|
|
||||||
await getVideoTrackEvents(),
|
|
||||||
["mute", "unmute"],
|
|
||||||
"unmute fired"
|
|
||||||
);
|
|
||||||
await closeStream();
|
|
||||||
},
|
|
||||||
},
|
|
||||||
|
|
||||||
{
|
|
||||||
desc: "getUserMedia audio+video: disabling & muting camera in combination",
|
|
||||||
// Test the following combinations of disabling and muting camera:
|
|
||||||
// 1. Disable video track only.
|
|
||||||
// 2. Mute camera & disable audio (to have a condition to wait for)
|
|
||||||
// 3. Enable both audio and video tracks (only audio should flow).
|
|
||||||
// 4. Unmute camera again (video should flow).
|
|
||||||
// 5. Mute camera & disable both tracks.
|
|
||||||
// 6. Unmute camera & enable audio (only audio should flow)
|
|
||||||
// 7. Enable video track again (video should flow).
|
|
||||||
run: async function checkDisabledMutedCombination() {
|
|
||||||
let observerPromise = expectObserverCalled("getUserMedia:request");
|
|
||||||
let promise = promisePopupNotificationShown("webRTC-shareDevices");
|
|
||||||
await promiseRequestDevice(true, true);
|
|
||||||
await promise;
|
|
||||||
await observerPromise;
|
|
||||||
checkDeviceSelectors(true, true);
|
|
||||||
|
|
||||||
let indicator = promiseIndicatorWindow();
|
|
||||||
let observerPromise1 = expectObserverCalled(
|
|
||||||
"getUserMedia:response:allow"
|
|
||||||
);
|
|
||||||
let observerPromise2 = expectObserverCalled("recording-device-events");
|
|
||||||
await promiseMessage("ok", () => {
|
|
||||||
PopupNotifications.panel.firstElementChild.button.click();
|
|
||||||
});
|
|
||||||
await observerPromise1;
|
|
||||||
await observerPromise2;
|
|
||||||
Assert.deepEqual(
|
|
||||||
await getMediaCaptureState(),
|
|
||||||
{ audio: true, video: true },
|
|
||||||
"expected camera and microphone to be shared"
|
|
||||||
);
|
|
||||||
await indicator;
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_ENABLED,
|
|
||||||
audio: STATE_CAPTURE_ENABLED,
|
|
||||||
});
|
|
||||||
|
|
||||||
// 1. Disable video track only.
|
|
||||||
observerPromise = expectObserverCalled("recording-device-events");
|
|
||||||
await setTrackEnabled(null, false);
|
|
||||||
|
|
||||||
// Wait for capture state to propagate to the UI asynchronously.
|
|
||||||
await BrowserTestUtils.waitForCondition(
|
|
||||||
() =>
|
|
||||||
window.gIdentityHandler._sharingState.webRTC.camera ==
|
|
||||||
STATE_CAPTURE_DISABLED,
|
|
||||||
"video should be disabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
await observerPromise;
|
|
||||||
|
|
||||||
// The identity UI should show only video as disabled.
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_DISABLED,
|
|
||||||
audio: STATE_CAPTURE_ENABLED,
|
|
||||||
});
|
|
||||||
is(await getVideoTrackMuted(), false, "video track still unmuted");
|
|
||||||
Assert.deepEqual(
|
|
||||||
await getVideoTrackEvents(),
|
|
||||||
[],
|
|
||||||
"no video track events fired yet"
|
|
||||||
);
|
|
||||||
|
|
||||||
// 2. Mute camera & disable audio (to have a condition to wait for)
|
|
||||||
observerPromise = expectObserverCalled("recording-device-events", 2);
|
|
||||||
await setCameraMuted(true);
|
|
||||||
await setTrackEnabled(false, null);
|
|
||||||
|
|
||||||
await BrowserTestUtils.waitForCondition(
|
|
||||||
() =>
|
|
||||||
window.gIdentityHandler._sharingState.webRTC.microphone ==
|
|
||||||
STATE_CAPTURE_DISABLED,
|
|
||||||
"audio should be disabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
await observerPromise;
|
|
||||||
|
|
||||||
// The identity UI should show both as disabled.
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_DISABLED,
|
|
||||||
audio: STATE_CAPTURE_DISABLED,
|
|
||||||
});
|
|
||||||
is(await getVideoTrackMuted(), true, "video track is muted");
|
|
||||||
Assert.deepEqual(
|
|
||||||
await getVideoTrackEvents(),
|
|
||||||
["mute"],
|
|
||||||
"mute is still fired even though track was disabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
// 3. Enable both audio and video tracks (only audio should flow).
|
|
||||||
observerPromise = expectObserverCalled("recording-device-events", 2);
|
|
||||||
await setTrackEnabled(true, true);
|
|
||||||
|
|
||||||
await BrowserTestUtils.waitForCondition(
|
|
||||||
() =>
|
|
||||||
window.gIdentityHandler._sharingState.webRTC.microphone ==
|
|
||||||
STATE_CAPTURE_ENABLED,
|
|
||||||
"audio should be enabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
await observerPromise;
|
|
||||||
|
|
||||||
// The identity UI should show only audio as enabled, as video is muted.
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_DISABLED,
|
|
||||||
audio: STATE_CAPTURE_ENABLED,
|
|
||||||
});
|
|
||||||
is(await getVideoTrackMuted(), true, "video track is still muted");
|
|
||||||
Assert.deepEqual(await getVideoTrackEvents(), ["mute"], "no new events");
|
|
||||||
|
|
||||||
// 4. Unmute camera again (video should flow).
|
|
||||||
observerPromise = expectObserverCalled("recording-device-events");
|
|
||||||
await setCameraMuted(false);
|
|
||||||
|
|
||||||
await BrowserTestUtils.waitForCondition(
|
|
||||||
() =>
|
|
||||||
window.gIdentityHandler._sharingState.webRTC.camera ==
|
|
||||||
STATE_CAPTURE_ENABLED,
|
|
||||||
"video should be enabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
await observerPromise;
|
|
||||||
|
|
||||||
// Both streams should show as running.
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_ENABLED,
|
|
||||||
audio: STATE_CAPTURE_ENABLED,
|
|
||||||
});
|
|
||||||
is(await getVideoTrackMuted(), false, "video track is unmuted");
|
|
||||||
Assert.deepEqual(
|
|
||||||
await getVideoTrackEvents(),
|
|
||||||
["mute", "unmute"],
|
|
||||||
"unmute fired"
|
|
||||||
);
|
|
||||||
|
|
||||||
// 5. Mute camera & disable both tracks.
|
|
||||||
observerPromise = expectObserverCalled("recording-device-events", 3);
|
|
||||||
await setCameraMuted(true);
|
|
||||||
await setTrackEnabled(false, false);
|
|
||||||
|
|
||||||
await BrowserTestUtils.waitForCondition(
|
|
||||||
() =>
|
|
||||||
window.gIdentityHandler._sharingState.webRTC.camera ==
|
|
||||||
STATE_CAPTURE_DISABLED,
|
|
||||||
"video should be disabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
await observerPromise;
|
|
||||||
|
|
||||||
// The identity UI should show both as disabled.
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_DISABLED,
|
|
||||||
audio: STATE_CAPTURE_DISABLED,
|
|
||||||
});
|
|
||||||
is(await getVideoTrackMuted(), true, "video track is muted");
|
|
||||||
Assert.deepEqual(
|
|
||||||
await getVideoTrackEvents(),
|
|
||||||
["mute", "unmute", "mute"],
|
|
||||||
"mute fired afain"
|
|
||||||
);
|
|
||||||
|
|
||||||
// 6. Unmute camera & enable audio (only audio should flow)
|
|
||||||
observerPromise = expectObserverCalled("recording-device-events", 2);
|
|
||||||
await setCameraMuted(false);
|
|
||||||
await setTrackEnabled(true, null);
|
|
||||||
|
|
||||||
await BrowserTestUtils.waitForCondition(
|
|
||||||
() =>
|
|
||||||
window.gIdentityHandler._sharingState.webRTC.microphone ==
|
|
||||||
STATE_CAPTURE_ENABLED,
|
|
||||||
"audio should be enabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
await observerPromise;
|
|
||||||
|
|
||||||
// Only audio should show as running, as video track is still disabled.
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_DISABLED,
|
|
||||||
audio: STATE_CAPTURE_ENABLED,
|
|
||||||
});
|
|
||||||
is(await getVideoTrackMuted(), false, "video track is unmuted");
|
|
||||||
Assert.deepEqual(
|
|
||||||
await getVideoTrackEvents(),
|
|
||||||
["mute", "unmute", "mute", "unmute"],
|
|
||||||
"unmute fired even though track is disabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
// 7. Enable video track again (video should flow).
|
|
||||||
observerPromise = expectObserverCalled("recording-device-events");
|
|
||||||
await setTrackEnabled(null, true);
|
|
||||||
|
|
||||||
await BrowserTestUtils.waitForCondition(
|
|
||||||
() =>
|
|
||||||
window.gIdentityHandler._sharingState.webRTC.camera ==
|
|
||||||
STATE_CAPTURE_ENABLED,
|
|
||||||
"video should be enabled"
|
|
||||||
);
|
|
||||||
|
|
||||||
await observerPromise;
|
|
||||||
|
|
||||||
// The identity UI should show both as running again.
|
|
||||||
await checkSharingUI({
|
|
||||||
video: STATE_CAPTURE_ENABLED,
|
|
||||||
audio: STATE_CAPTURE_ENABLED,
|
|
||||||
});
|
|
||||||
is(await getVideoTrackMuted(), false, "video track remains unmuted");
|
|
||||||
Assert.deepEqual(
|
|
||||||
await getVideoTrackEvents(),
|
|
||||||
["mute", "unmute", "mute", "unmute"],
|
|
||||||
"no new events fired"
|
|
||||||
);
|
|
||||||
await closeStream();
|
|
||||||
},
|
|
||||||
},
|
|
||||||
];
|
];
|
||||||
|
|
||||||
add_task(async function test() {
|
add_task(async function test() {
|
||||||
|
|||||||
@@ -24,12 +24,14 @@ function message(m) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var gStreams = [];
|
var gStreams = [];
|
||||||
var gVideoEvents = [];
|
|
||||||
|
|
||||||
async function requestDevice(aAudio, aVideo, aShare, aBadDevice = false) {
|
function requestDevice(aAudio, aVideo, aShare, aBadDevice = false) {
|
||||||
const opts = {video: aVideo, audio: aAudio};
|
var opts = {video: aVideo, audio: aAudio};
|
||||||
if (aShare) {
|
if (aShare) {
|
||||||
opts.video = { mediaSource: aShare };
|
opts.video = {
|
||||||
|
mozMediaSource: aShare,
|
||||||
|
mediaSource: aShare,
|
||||||
|
};
|
||||||
}
|
}
|
||||||
if (useFakeStreams) {
|
if (useFakeStreams) {
|
||||||
opts.fake = true;
|
opts.fake = true;
|
||||||
@@ -49,19 +51,11 @@ async function requestDevice(aAudio, aVideo, aShare, aBadDevice = false) {
|
|||||||
opts.fake = true;
|
opts.fake = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
navigator.mediaDevices.getUserMedia(opts)
|
||||||
const stream = await navigator.mediaDevices.getUserMedia(opts)
|
.then(stream => {
|
||||||
gStreams.push(stream);
|
gStreams.push(stream);
|
||||||
const track = stream.getVideoTracks()[0];
|
message("ok");
|
||||||
if (track) {
|
}, err => message("error: " + err));
|
||||||
for (const name of ["mute", "unmute", "ended"]) {
|
|
||||||
track.addEventListener(name, () => gVideoEvents.push(name));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
message("ok");
|
|
||||||
} catch (err) {
|
|
||||||
message("error: " + err);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
message("pending");
|
message("pending");
|
||||||
|
|
||||||
@@ -73,7 +67,6 @@ function closeStream() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
gStreams = [];
|
gStreams = [];
|
||||||
gVideoEvents = [];
|
|
||||||
message("closed");
|
message("closed");
|
||||||
}
|
}
|
||||||
</script>
|
</script>
|
||||||
|
|||||||
@@ -1130,8 +1130,8 @@ class HTMLMediaElement::MediaElementTrackSource
|
|||||||
if (!mTrack) {
|
if (!mTrack) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mTrack->SetDisabledTrackMode(aEnabled ? DisabledTrackMode::ENABLED
|
mTrack->SetEnabled(aEnabled ? DisabledTrackMode::ENABLED
|
||||||
: DisabledTrackMode::SILENCE_FREEZE);
|
: DisabledTrackMode::SILENCE_FREEZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetPrincipal(RefPtr<nsIPrincipal> aPrincipal) {
|
void SetPrincipal(RefPtr<nsIPrincipal> aPrincipal) {
|
||||||
|
|||||||
@@ -66,11 +66,6 @@ void ForwardedInputTrack::RemoveInput(MediaInputPort* aPort) {
|
|||||||
this, listener.get(), aPort->GetSource()));
|
this, listener.get(), aPort->GetSource()));
|
||||||
source->RemoveDirectListenerImpl(listener);
|
source->RemoveDirectListenerImpl(listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
DisabledTrackMode oldMode = CombinedDisabledMode();
|
|
||||||
mInputDisabledMode = DisabledTrackMode::ENABLED;
|
|
||||||
NotifyIfDisabledModeChangedFrom(oldMode);
|
|
||||||
|
|
||||||
mInputPort = nullptr;
|
mInputPort = nullptr;
|
||||||
ProcessedMediaTrack::RemoveInput(aPort);
|
ProcessedMediaTrack::RemoveInput(aPort);
|
||||||
}
|
}
|
||||||
@@ -80,8 +75,6 @@ void ForwardedInputTrack::SetInput(MediaInputPort* aPort) {
|
|||||||
MOZ_ASSERT(aPort->GetSource());
|
MOZ_ASSERT(aPort->GetSource());
|
||||||
MOZ_ASSERT(aPort->GetSource()->GetData());
|
MOZ_ASSERT(aPort->GetSource()->GetData());
|
||||||
MOZ_ASSERT(!mInputPort);
|
MOZ_ASSERT(!mInputPort);
|
||||||
MOZ_ASSERT(mInputDisabledMode == DisabledTrackMode::ENABLED);
|
|
||||||
|
|
||||||
mInputPort = aPort;
|
mInputPort = aPort;
|
||||||
|
|
||||||
for (const auto& listener : mOwnedDirectListeners) {
|
for (const auto& listener : mOwnedDirectListeners) {
|
||||||
@@ -91,10 +84,6 @@ void ForwardedInputTrack::SetInput(MediaInputPort* aPort) {
|
|||||||
this, listener.get(), aPort->GetSource()));
|
this, listener.get(), aPort->GetSource()));
|
||||||
source->AddDirectListenerImpl(do_AddRef(listener));
|
source->AddDirectListenerImpl(do_AddRef(listener));
|
||||||
}
|
}
|
||||||
|
|
||||||
DisabledTrackMode oldMode = CombinedDisabledMode();
|
|
||||||
mInputDisabledMode = mInputPort->GetSource()->CombinedDisabledMode();
|
|
||||||
NotifyIfDisabledModeChangedFrom(oldMode);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForwardedInputTrack::ProcessInputImpl(MediaTrack* aSource,
|
void ForwardedInputTrack::ProcessInputImpl(MediaTrack* aSource,
|
||||||
@@ -181,19 +170,7 @@ void ForwardedInputTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DisabledTrackMode ForwardedInputTrack::CombinedDisabledMode() const {
|
void ForwardedInputTrack::SetEnabledImpl(DisabledTrackMode aMode) {
|
||||||
if (mDisabledMode == DisabledTrackMode::SILENCE_BLACK ||
|
|
||||||
mInputDisabledMode == DisabledTrackMode::SILENCE_BLACK) {
|
|
||||||
return DisabledTrackMode::SILENCE_BLACK;
|
|
||||||
}
|
|
||||||
if (mDisabledMode == DisabledTrackMode::SILENCE_FREEZE ||
|
|
||||||
mInputDisabledMode == DisabledTrackMode::SILENCE_FREEZE) {
|
|
||||||
return DisabledTrackMode::SILENCE_FREEZE;
|
|
||||||
}
|
|
||||||
return DisabledTrackMode::ENABLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
void ForwardedInputTrack::SetDisabledTrackModeImpl(DisabledTrackMode aMode) {
|
|
||||||
bool enabled = aMode == DisabledTrackMode::ENABLED;
|
bool enabled = aMode == DisabledTrackMode::ENABLED;
|
||||||
TRACK_LOG(LogLevel::Info, ("ForwardedInputTrack %p was explicitly %s", this,
|
TRACK_LOG(LogLevel::Info, ("ForwardedInputTrack %p was explicitly %s", this,
|
||||||
enabled ? "enabled" : "disabled"));
|
enabled ? "enabled" : "disabled"));
|
||||||
@@ -212,22 +189,7 @@ void ForwardedInputTrack::SetDisabledTrackModeImpl(DisabledTrackMode aMode) {
|
|||||||
listener->IncreaseDisabled(aMode);
|
listener->IncreaseDisabled(aMode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
MediaTrack::SetDisabledTrackModeImpl(aMode);
|
MediaTrack::SetEnabledImpl(aMode);
|
||||||
}
|
|
||||||
|
|
||||||
void ForwardedInputTrack::OnInputDisabledModeChanged(
|
|
||||||
DisabledTrackMode aInputMode) {
|
|
||||||
MOZ_ASSERT(mInputs.Length() == 1);
|
|
||||||
MOZ_ASSERT(mInputs[0]->GetSource());
|
|
||||||
DisabledTrackMode oldMode = CombinedDisabledMode();
|
|
||||||
if (mInputDisabledMode == DisabledTrackMode::SILENCE_BLACK &&
|
|
||||||
aInputMode == DisabledTrackMode::SILENCE_FREEZE) {
|
|
||||||
// Don't allow demoting from SILENCE_BLACK to SILENCE_FREEZE. Frames will
|
|
||||||
// remain black so we shouldn't notify that the track got enabled.
|
|
||||||
aInputMode = DisabledTrackMode::SILENCE_BLACK;
|
|
||||||
}
|
|
||||||
mInputDisabledMode = aInputMode;
|
|
||||||
NotifyIfDisabledModeChangedFrom(oldMode);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ForwardedInputTrack::AddDirectListenerImpl(
|
void ForwardedInputTrack::AddDirectListenerImpl(
|
||||||
|
|||||||
@@ -26,9 +26,7 @@ class ForwardedInputTrack : public ProcessedMediaTrack {
|
|||||||
void RemoveInput(MediaInputPort* aPort) override;
|
void RemoveInput(MediaInputPort* aPort) override;
|
||||||
void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
|
void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
|
||||||
|
|
||||||
DisabledTrackMode CombinedDisabledMode() const override;
|
void SetEnabledImpl(DisabledTrackMode aMode) override;
|
||||||
void SetDisabledTrackModeImpl(DisabledTrackMode aMode) override;
|
|
||||||
void OnInputDisabledModeChanged(DisabledTrackMode aInputMode) override;
|
|
||||||
|
|
||||||
friend class MediaTrackGraphImpl;
|
friend class MediaTrackGraphImpl;
|
||||||
|
|
||||||
@@ -54,11 +52,6 @@ class ForwardedInputTrack : public ProcessedMediaTrack {
|
|||||||
// Set if an input has been added, nullptr otherwise. Adding more than one
|
// Set if an input has been added, nullptr otherwise. Adding more than one
|
||||||
// input is an error.
|
// input is an error.
|
||||||
MediaInputPort* mInputPort = nullptr;
|
MediaInputPort* mInputPort = nullptr;
|
||||||
|
|
||||||
// This track's input's associated disabled mode. ENABLED if there is no
|
|
||||||
// input. This is used with MediaTrackListener::NotifyEnabledStateChanged(),
|
|
||||||
// which affects only video tracks. This is set only on ForwardedInputTracks.
|
|
||||||
DisabledTrackMode mInputDisabledMode = DisabledTrackMode::ENABLED;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace mozilla
|
} // namespace mozilla
|
||||||
|
|||||||
@@ -210,16 +210,10 @@ struct DeviceState {
|
|||||||
// MainThread only.
|
// MainThread only.
|
||||||
bool mStopped = false;
|
bool mStopped = false;
|
||||||
|
|
||||||
// true if mDevice is currently enabled.
|
// true if mDevice is currently enabled, i.e., turned on and capturing.
|
||||||
// A device must be both enabled and unmuted to be turned on and capturing.
|
|
||||||
// MainThread only.
|
// MainThread only.
|
||||||
bool mDeviceEnabled = false;
|
bool mDeviceEnabled = false;
|
||||||
|
|
||||||
// true if mDevice is currently muted.
|
|
||||||
// A device that is either muted or disabled is turned off and not capturing.
|
|
||||||
// MainThread only.
|
|
||||||
bool mDeviceMuted;
|
|
||||||
|
|
||||||
// true if the application has currently enabled mDevice.
|
// true if the application has currently enabled mDevice.
|
||||||
// MainThread only.
|
// MainThread only.
|
||||||
bool mTrackEnabled = false;
|
bool mTrackEnabled = false;
|
||||||
@@ -234,7 +228,7 @@ struct DeviceState {
|
|||||||
bool mOperationInProgress = false;
|
bool mOperationInProgress = false;
|
||||||
|
|
||||||
// true if we are allowed to turn off the underlying source while all tracks
|
// true if we are allowed to turn off the underlying source while all tracks
|
||||||
// are disabled. Only affects disabling; always turns off on user-agent mute.
|
// are disabled.
|
||||||
// MainThread only.
|
// MainThread only.
|
||||||
bool mOffWhileDisabled = false;
|
bool mOffWhileDisabled = false;
|
||||||
|
|
||||||
@@ -319,8 +313,7 @@ class SourceListener : public SupportsWeakPtr {
|
|||||||
void Activate(RefPtr<MediaDevice> aAudioDevice,
|
void Activate(RefPtr<MediaDevice> aAudioDevice,
|
||||||
RefPtr<LocalTrackSource> aAudioTrackSource,
|
RefPtr<LocalTrackSource> aAudioTrackSource,
|
||||||
RefPtr<MediaDevice> aVideoDevice,
|
RefPtr<MediaDevice> aVideoDevice,
|
||||||
RefPtr<LocalTrackSource> aVideoTrackSource,
|
RefPtr<LocalTrackSource> aVideoTrackSource);
|
||||||
bool aStartVideoMuted, bool aStartAudioMuted);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Posts a task to initialize and start all associated devices.
|
* Posts a task to initialize and start all associated devices.
|
||||||
@@ -385,31 +378,12 @@ class SourceListener : public SupportsWeakPtr {
|
|||||||
*/
|
*/
|
||||||
void SetEnabledFor(MediaTrack* aTrack, bool aEnabled);
|
void SetEnabledFor(MediaTrack* aTrack, bool aEnabled);
|
||||||
|
|
||||||
/**
|
|
||||||
* Posts a task to set the muted state of the device associated with
|
|
||||||
* aTrackSource to aMuted and notifies the associated window listener that a
|
|
||||||
* track's state has changed.
|
|
||||||
*
|
|
||||||
* Turning the hardware off while the device is muted is supported for:
|
|
||||||
* - Camera (enabled by default, controlled by pref
|
|
||||||
* "media.getusermedia.camera.off_while_disabled.enabled")
|
|
||||||
* - Microphone (disabled by default, controlled by pref
|
|
||||||
* "media.getusermedia.microphone.off_while_disabled.enabled")
|
|
||||||
* Screen-, app-, or windowsharing is not supported at this time.
|
|
||||||
*/
|
|
||||||
void SetMutedFor(LocalTrackSource* aTrackSource, bool aMuted);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stops all screen/app/window/audioCapture sharing, but not camera or
|
* Stops all screen/app/window/audioCapture sharing, but not camera or
|
||||||
* microphone.
|
* microphone.
|
||||||
*/
|
*/
|
||||||
void StopSharing();
|
void StopSharing();
|
||||||
|
|
||||||
/**
|
|
||||||
* Mutes or unmutes the associated video device if it is a camera.
|
|
||||||
*/
|
|
||||||
void MuteOrUnmuteCamera(bool aMute);
|
|
||||||
|
|
||||||
MediaDevice* GetAudioDevice() const {
|
MediaDevice* GetAudioDevice() const {
|
||||||
return mAudioDeviceState ? mAudioDeviceState->mDevice.get() : nullptr;
|
return mAudioDeviceState ? mAudioDeviceState->mDevice.get() : nullptr;
|
||||||
}
|
}
|
||||||
@@ -437,15 +411,6 @@ class SourceListener : public SupportsWeakPtr {
|
|||||||
private:
|
private:
|
||||||
virtual ~SourceListener() = default;
|
virtual ~SourceListener() = default;
|
||||||
|
|
||||||
using DeviceOperationPromise =
|
|
||||||
MozPromise<nsresult, bool, /* IsExclusive = */ true>;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Posts a task to start or stop the device associated with aTrack, based on
|
|
||||||
* a passed-in boolean. Private method used by SetEnabledFor and SetMutedFor.
|
|
||||||
*/
|
|
||||||
RefPtr<DeviceOperationPromise> UpdateDevice(MediaTrack* aTrack, bool aOn);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a pointer to the device state for aTrack.
|
* Returns a pointer to the device state for aTrack.
|
||||||
*
|
*
|
||||||
@@ -528,8 +493,7 @@ class GetUserMediaWindowListener {
|
|||||||
|
|
||||||
mInactiveListeners.RemoveElement(aListener);
|
mInactiveListeners.RemoveElement(aListener);
|
||||||
aListener->Activate(std::move(aAudioDevice), std::move(aAudioTrackSource),
|
aListener->Activate(std::move(aAudioDevice), std::move(aAudioTrackSource),
|
||||||
std::move(aVideoDevice), std::move(aVideoTrackSource),
|
std::move(aVideoDevice), std::move(aVideoTrackSource));
|
||||||
mCamerasAreMuted, /* aStartAudioMuted */ false);
|
|
||||||
mActiveListeners.AppendElement(std::move(aListener));
|
mActiveListeners.AppendElement(std::move(aListener));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -674,8 +638,6 @@ class GetUserMediaWindowListener {
|
|||||||
|
|
||||||
void StopRawID(const nsString& removedDeviceID);
|
void StopRawID(const nsString& removedDeviceID);
|
||||||
|
|
||||||
void MuteOrUnmuteCameras(bool aMute);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called by one of our SourceListeners when one of its tracks has changed so
|
* Called by one of our SourceListeners when one of its tracks has changed so
|
||||||
* that chrome state is affected.
|
* that chrome state is affected.
|
||||||
@@ -749,12 +711,6 @@ class GetUserMediaWindowListener {
|
|||||||
|
|
||||||
nsTArray<RefPtr<SourceListener>> mInactiveListeners;
|
nsTArray<RefPtr<SourceListener>> mInactiveListeners;
|
||||||
nsTArray<RefPtr<SourceListener>> mActiveListeners;
|
nsTArray<RefPtr<SourceListener>> mActiveListeners;
|
||||||
|
|
||||||
// Whether camera access in this window is currently User Agent (UA) muted.
|
|
||||||
// When true, new camera tracks must start out muted, to avoid JS
|
|
||||||
// circumventing UA mute by calling getUserMedia again.
|
|
||||||
// Per-camera UA muting is not supported.
|
|
||||||
bool mCamerasAreMuted = false;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class LocalTrackSource : public MediaStreamTrackSource {
|
class LocalTrackSource : public MediaStreamTrackSource {
|
||||||
@@ -815,16 +771,6 @@ class LocalTrackSource : public MediaStreamTrackSource {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Mute() {
|
|
||||||
MutedChanged(true);
|
|
||||||
mTrack->SetDisabledTrackMode(DisabledTrackMode::SILENCE_BLACK);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Unmute() {
|
|
||||||
MutedChanged(false);
|
|
||||||
mTrack->SetDisabledTrackMode(DisabledTrackMode::ENABLED);
|
|
||||||
}
|
|
||||||
|
|
||||||
const MediaSourceEnum mSource;
|
const MediaSourceEnum mSource;
|
||||||
const RefPtr<MediaTrack> mTrack;
|
const RefPtr<MediaTrack> mTrack;
|
||||||
const RefPtr<const PeerIdentity> mPeerIdentity;
|
const RefPtr<const PeerIdentity> mPeerIdentity;
|
||||||
@@ -2049,10 +1995,6 @@ MediaManager* MediaManager::Get() {
|
|||||||
obs->AddObserver(sSingleton, "getUserMedia:response:noOSPermission",
|
obs->AddObserver(sSingleton, "getUserMedia:response:noOSPermission",
|
||||||
false);
|
false);
|
||||||
obs->AddObserver(sSingleton, "getUserMedia:revoke", false);
|
obs->AddObserver(sSingleton, "getUserMedia:revoke", false);
|
||||||
obs->AddObserver(sSingleton, "getUserMedia:muteVideo", false);
|
|
||||||
obs->AddObserver(sSingleton, "getUserMedia:unmuteVideo", false);
|
|
||||||
obs->AddObserver(sSingleton, "application-background", false);
|
|
||||||
obs->AddObserver(sSingleton, "application-foreground", false);
|
|
||||||
}
|
}
|
||||||
// else MediaManager won't work properly and will leak (see bug 837874)
|
// else MediaManager won't work properly and will leak (see bug 837874)
|
||||||
nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
||||||
@@ -3450,17 +3392,6 @@ void MediaManager::OnNavigation(uint64_t aWindowID) {
|
|||||||
MOZ_ASSERT(!GetWindowListener(aWindowID));
|
MOZ_ASSERT(!GetWindowListener(aWindowID));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MediaManager::OnCameraMute(bool aMute) {
|
|
||||||
MOZ_ASSERT(NS_IsMainThread());
|
|
||||||
LOG("OnCameraMute for all windows");
|
|
||||||
mCamerasMuted = aMute;
|
|
||||||
// This is safe since we're on main-thread, and the windowlist can only
|
|
||||||
// be added to from the main-thread
|
|
||||||
for (auto iter = mActiveWindows.Iter(); !iter.Done(); iter.Next()) {
|
|
||||||
iter.UserData()->MuteOrUnmuteCameras(aMute);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MediaManager::AddWindowID(uint64_t aWindowId,
|
void MediaManager::AddWindowID(uint64_t aWindowId,
|
||||||
RefPtr<GetUserMediaWindowListener> aListener) {
|
RefPtr<GetUserMediaWindowListener> aListener) {
|
||||||
MOZ_ASSERT(NS_IsMainThread());
|
MOZ_ASSERT(NS_IsMainThread());
|
||||||
@@ -3473,7 +3404,6 @@ void MediaManager::AddWindowID(uint64_t aWindowId,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
aListener->MuteOrUnmuteCameras(mCamerasMuted);
|
|
||||||
GetActiveWindows()->Put(aWindowId, std::move(aListener));
|
GetActiveWindows()->Put(aWindowId, std::move(aListener));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -3589,10 +3519,6 @@ void MediaManager::Shutdown() {
|
|||||||
obs->RemoveObserver(this, "getUserMedia:response:deny");
|
obs->RemoveObserver(this, "getUserMedia:response:deny");
|
||||||
obs->RemoveObserver(this, "getUserMedia:response:noOSPermission");
|
obs->RemoveObserver(this, "getUserMedia:response:noOSPermission");
|
||||||
obs->RemoveObserver(this, "getUserMedia:revoke");
|
obs->RemoveObserver(this, "getUserMedia:revoke");
|
||||||
obs->RemoveObserver(this, "getUserMedia:muteVideo");
|
|
||||||
obs->RemoveObserver(this, "getUserMedia:unmuteVideo");
|
|
||||||
obs->RemoveObserver(this, "application-background");
|
|
||||||
obs->RemoveObserver(this, "application-foreground");
|
|
||||||
|
|
||||||
nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
nsCOMPtr<nsIPrefBranch> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
||||||
if (prefs) {
|
if (prefs) {
|
||||||
@@ -3744,23 +3670,6 @@ bool IsGUMResponseNoAccess(const char* aTopic,
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static MediaSourceEnum ParseScreenColonWindowID(const char16_t* aData,
|
|
||||||
uint64_t* aWindowIDOut) {
|
|
||||||
MOZ_ASSERT(aWindowIDOut);
|
|
||||||
// may be windowid or screen:windowid
|
|
||||||
const nsDependentString data(aData);
|
|
||||||
if (Substring(data, 0, strlen("screen:")).EqualsLiteral("screen:")) {
|
|
||||||
nsresult rv;
|
|
||||||
*aWindowIDOut = Substring(data, strlen("screen:")).ToInteger64(&rv);
|
|
||||||
MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
|
|
||||||
return MediaSourceEnum::Screen;
|
|
||||||
}
|
|
||||||
nsresult rv;
|
|
||||||
*aWindowIDOut = data.ToInteger64(&rv);
|
|
||||||
MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv));
|
|
||||||
return MediaSourceEnum::Camera;
|
|
||||||
}
|
|
||||||
|
|
||||||
nsresult MediaManager::Observe(nsISupports* aSubject, const char* aTopic,
|
nsresult MediaManager::Observe(nsISupports* aSubject, const char* aTopic,
|
||||||
const char16_t* aData) {
|
const char16_t* aData) {
|
||||||
MOZ_ASSERT(NS_IsMainThread());
|
MOZ_ASSERT(NS_IsMainThread());
|
||||||
@@ -3865,32 +3774,28 @@ nsresult MediaManager::Observe(nsISupports* aSubject, const char* aTopic,
|
|||||||
return NS_OK;
|
return NS_OK;
|
||||||
|
|
||||||
} else if (!strcmp(aTopic, "getUserMedia:revoke")) {
|
} else if (!strcmp(aTopic, "getUserMedia:revoke")) {
|
||||||
uint64_t windowID;
|
nsresult rv;
|
||||||
if (ParseScreenColonWindowID(aData, &windowID) == MediaSourceEnum::Screen) {
|
// may be windowid or screen:windowid
|
||||||
LOG("Revoking ScreenCapture access for window %" PRIu64, windowID);
|
const nsDependentString data(aData);
|
||||||
StopScreensharing(windowID);
|
if (Substring(data, 0, strlen("screen:")).EqualsLiteral("screen:")) {
|
||||||
|
uint64_t windowID = Substring(data, strlen("screen:")).ToInteger64(&rv);
|
||||||
|
MOZ_ASSERT(NS_SUCCEEDED(rv));
|
||||||
|
if (NS_SUCCEEDED(rv)) {
|
||||||
|
LOG("Revoking Screen/windowCapture access for window %" PRIu64,
|
||||||
|
windowID);
|
||||||
|
StopScreensharing(windowID);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG("Revoking MediaCapture access for window %" PRIu64, windowID);
|
uint64_t windowID = data.ToInteger64(&rv);
|
||||||
OnNavigation(windowID);
|
MOZ_ASSERT(NS_SUCCEEDED(rv));
|
||||||
|
if (NS_SUCCEEDED(rv)) {
|
||||||
|
LOG("Revoking MediaCapture access for window %" PRIu64, windowID);
|
||||||
|
OnNavigation(windowID);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
} else if (!strcmp(aTopic, "getUserMedia:muteVideo") ||
|
|
||||||
!strcmp(aTopic, "getUserMedia:unmuteVideo")) {
|
|
||||||
OnCameraMute(!strcmp(aTopic, "getUserMedia:muteVideo"));
|
|
||||||
return NS_OK;
|
|
||||||
} else if ((!strcmp(aTopic, "application-background") ||
|
|
||||||
!strcmp(aTopic, "application-foreground")) &&
|
|
||||||
StaticPrefs::media_getusermedia_camera_background_mute_enabled()) {
|
|
||||||
// TODO: These don't fire in the content process yet (see bug 1660049).
|
|
||||||
//
|
|
||||||
// On mobile we turn off any cameras (but not mics) while in the background.
|
|
||||||
// Keeping things simple for now by duplicating test-covered code above.
|
|
||||||
//
|
|
||||||
// NOTE: If a mobile device ever wants to implement "getUserMedia:muteVideo"
|
|
||||||
// as well, it'd need to update this code to handle & test the combinations.
|
|
||||||
OnCameraMute(!strcmp(aTopic, "application-background"));
|
|
||||||
return NS_OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4138,8 +4043,7 @@ void SourceListener::Register(GetUserMediaWindowListener* aListener) {
|
|||||||
void SourceListener::Activate(RefPtr<MediaDevice> aAudioDevice,
|
void SourceListener::Activate(RefPtr<MediaDevice> aAudioDevice,
|
||||||
RefPtr<LocalTrackSource> aAudioTrackSource,
|
RefPtr<LocalTrackSource> aAudioTrackSource,
|
||||||
RefPtr<MediaDevice> aVideoDevice,
|
RefPtr<MediaDevice> aVideoDevice,
|
||||||
RefPtr<LocalTrackSource> aVideoTrackSource,
|
RefPtr<LocalTrackSource> aVideoTrackSource) {
|
||||||
bool aStartVideoMuted, bool aStartAudioMuted) {
|
|
||||||
MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
|
MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
|
||||||
|
|
||||||
LOG("SourceListener %p activating audio=%p video=%p", this,
|
LOG("SourceListener %p activating audio=%p video=%p", this,
|
||||||
@@ -4157,10 +4061,6 @@ void SourceListener::Activate(RefPtr<MediaDevice> aAudioDevice,
|
|||||||
mAudioDeviceState =
|
mAudioDeviceState =
|
||||||
MakeUnique<DeviceState>(std::move(aAudioDevice),
|
MakeUnique<DeviceState>(std::move(aAudioDevice),
|
||||||
std::move(aAudioTrackSource), offWhileDisabled);
|
std::move(aAudioTrackSource), offWhileDisabled);
|
||||||
mAudioDeviceState->mDeviceMuted = aStartAudioMuted;
|
|
||||||
if (aStartAudioMuted) {
|
|
||||||
mAudioDeviceState->mTrackSource->Mute();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (aVideoDevice) {
|
if (aVideoDevice) {
|
||||||
@@ -4171,10 +4071,6 @@ void SourceListener::Activate(RefPtr<MediaDevice> aAudioDevice,
|
|||||||
mVideoDeviceState =
|
mVideoDeviceState =
|
||||||
MakeUnique<DeviceState>(std::move(aVideoDevice),
|
MakeUnique<DeviceState>(std::move(aVideoDevice),
|
||||||
std::move(aVideoTrackSource), offWhileDisabled);
|
std::move(aVideoTrackSource), offWhileDisabled);
|
||||||
mVideoDeviceState->mDeviceMuted = aStartVideoMuted;
|
|
||||||
if (aStartVideoMuted) {
|
|
||||||
mVideoDeviceState->mTrackSource->Mute();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4191,15 +4087,11 @@ SourceListener::InitializeAsync() {
|
|||||||
audioStream = mAudioDeviceState
|
audioStream = mAudioDeviceState
|
||||||
? mAudioDeviceState->mTrackSource->mTrack
|
? mAudioDeviceState->mTrackSource->mTrack
|
||||||
: nullptr,
|
: nullptr,
|
||||||
audioDeviceMuted =
|
|
||||||
mAudioDeviceState ? mAudioDeviceState->mDeviceMuted : false,
|
|
||||||
videoDevice =
|
videoDevice =
|
||||||
mVideoDeviceState ? mVideoDeviceState->mDevice : nullptr,
|
mVideoDeviceState ? mVideoDeviceState->mDevice : nullptr,
|
||||||
videoStream = mVideoDeviceState
|
videoStream = mVideoDeviceState
|
||||||
? mVideoDeviceState->mTrackSource->mTrack
|
? mVideoDeviceState->mTrackSource->mTrack
|
||||||
: nullptr,
|
: nullptr](
|
||||||
videoDeviceMuted =
|
|
||||||
mVideoDeviceState ? mVideoDeviceState->mDeviceMuted : false](
|
|
||||||
MozPromiseHolder<SourceListenerPromise>& aHolder) {
|
MozPromiseHolder<SourceListenerPromise>& aHolder) {
|
||||||
if (audioDevice) {
|
if (audioDevice) {
|
||||||
audioDevice->SetTrack(audioStream->AsSourceTrack(), principal);
|
audioDevice->SetTrack(audioStream->AsSourceTrack(), principal);
|
||||||
@@ -4210,7 +4102,7 @@ SourceListener::InitializeAsync() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (audioDevice) {
|
if (audioDevice) {
|
||||||
nsresult rv = audioDeviceMuted ? NS_OK : audioDevice->Start();
|
nsresult rv = audioDevice->Start();
|
||||||
if (rv == NS_ERROR_NOT_AVAILABLE) {
|
if (rv == NS_ERROR_NOT_AVAILABLE) {
|
||||||
PR_Sleep(200);
|
PR_Sleep(200);
|
||||||
rv = audioDevice->Start();
|
rv = audioDevice->Start();
|
||||||
@@ -4234,7 +4126,7 @@ SourceListener::InitializeAsync() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (videoDevice) {
|
if (videoDevice) {
|
||||||
nsresult rv = videoDeviceMuted ? NS_OK : videoDevice->Start();
|
nsresult rv = videoDevice->Start();
|
||||||
if (NS_FAILED(rv)) {
|
if (NS_FAILED(rv)) {
|
||||||
if (audioDevice) {
|
if (audioDevice) {
|
||||||
if (NS_WARN_IF(NS_FAILED(audioDevice->Stop()))) {
|
if (NS_WARN_IF(NS_FAILED(audioDevice->Stop()))) {
|
||||||
@@ -4383,80 +4275,6 @@ void SourceListener::GetSettingsFor(MediaTrack* aTrack,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool SameGroupAsCurrentAudioOutput(const nsString& aGroupId) {
|
|
||||||
CubebDeviceEnumerator* enumerator = CubebDeviceEnumerator::GetInstance();
|
|
||||||
// Get the current graph's device info. This is always the
|
|
||||||
// default audio output device for now.
|
|
||||||
RefPtr<AudioDeviceInfo> outputDevice =
|
|
||||||
enumerator->DefaultDevice(CubebDeviceEnumerator::Side::OUTPUT);
|
|
||||||
return outputDevice && outputDevice->GroupID().Equals(aGroupId);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto SourceListener::UpdateDevice(MediaTrack* aTrack, bool aOn)
|
|
||||||
-> RefPtr<DeviceOperationPromise> {
|
|
||||||
MOZ_ASSERT(NS_IsMainThread());
|
|
||||||
RefPtr<SourceListener> self = this;
|
|
||||||
DeviceState& state = GetDeviceStateFor(aTrack);
|
|
||||||
nsString groupId;
|
|
||||||
state.mDevice->GetRawGroupId(groupId);
|
|
||||||
|
|
||||||
return MediaManager::Dispatch<DeviceOperationPromise>(
|
|
||||||
__func__,
|
|
||||||
[self, device = state.mDevice, aOn,
|
|
||||||
groupId](MozPromiseHolder<DeviceOperationPromise>& h) {
|
|
||||||
if (device->mKind == dom::MediaDeviceKind::Audioinput && !aOn &&
|
|
||||||
SameGroupAsCurrentAudioOutput(groupId)) {
|
|
||||||
// Don't turn off the microphone of a device that is on the
|
|
||||||
// same physical device as the output.
|
|
||||||
//
|
|
||||||
// Also don't take this branch when turning on, in case the
|
|
||||||
// default audio output device has changed. The AudioInput
|
|
||||||
// source start/stop are idempotent, so this works.
|
|
||||||
LOG("Not turning device off, as it matches audio output (%s)",
|
|
||||||
NS_ConvertUTF16toUTF8(groupId).get());
|
|
||||||
h.Resolve(NS_OK, __func__);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
LOG("Turning %s device (%s)", aOn ? "on" : "off",
|
|
||||||
NS_ConvertUTF16toUTF8(groupId).get());
|
|
||||||
h.Resolve(aOn ? device->Start() : device->Stop(), __func__);
|
|
||||||
})
|
|
||||||
->Then(
|
|
||||||
GetMainThreadSerialEventTarget(), __func__,
|
|
||||||
[self, this, &state, track = RefPtr<MediaTrack>(aTrack),
|
|
||||||
aOn](nsresult aResult) {
|
|
||||||
if (state.mStopped) {
|
|
||||||
// Device was stopped on main thread during the operation. Done.
|
|
||||||
return DeviceOperationPromise::CreateAndResolve(aResult,
|
|
||||||
__func__);
|
|
||||||
}
|
|
||||||
LOG("SourceListener %p turning %s %s input device for track %p %s",
|
|
||||||
this, aOn ? "on" : "off",
|
|
||||||
&state == mAudioDeviceState.get() ? "audio" : "video",
|
|
||||||
track.get(), NS_SUCCEEDED(aResult) ? "succeeded" : "failed");
|
|
||||||
|
|
||||||
if (NS_FAILED(aResult) && aResult != NS_ERROR_ABORT) {
|
|
||||||
// This path handles errors from starting or stopping the device.
|
|
||||||
// NS_ERROR_ABORT are for cases where *we* aborted. They need
|
|
||||||
// graceful handling.
|
|
||||||
if (aOn) {
|
|
||||||
// Starting the device failed. Stopping the track here will make
|
|
||||||
// the MediaStreamTrack end after a pass through the
|
|
||||||
// MediaTrackGraph.
|
|
||||||
StopTrack(track);
|
|
||||||
} else {
|
|
||||||
// Stopping the device failed. This is odd, but not fatal.
|
|
||||||
MOZ_ASSERT_UNREACHABLE("The device should be stoppable");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return DeviceOperationPromise::CreateAndResolve(aResult, __func__);
|
|
||||||
},
|
|
||||||
[]() {
|
|
||||||
MOZ_ASSERT_UNREACHABLE("Unexpected and unhandled reject");
|
|
||||||
return DeviceOperationPromise::CreateAndReject(false, __func__);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void SourceListener::SetEnabledFor(MediaTrack* aTrack, bool aEnable) {
|
void SourceListener::SetEnabledFor(MediaTrack* aTrack, bool aEnable) {
|
||||||
MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
|
MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
|
||||||
MOZ_ASSERT(Activated(), "No device to set enabled state for");
|
MOZ_ASSERT(Activated(), "No device to set enabled state for");
|
||||||
@@ -4509,6 +4327,8 @@ void SourceListener::SetEnabledFor(MediaTrack* aTrack, bool aEnable) {
|
|||||||
timerPromise = state.mDisableTimer->WaitFor(delay, __func__);
|
timerPromise = state.mDisableTimer->WaitFor(delay, __func__);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef MozPromise<nsresult, bool, /* IsExclusive = */ true>
|
||||||
|
DeviceOperationPromise;
|
||||||
RefPtr<SourceListener> self = this;
|
RefPtr<SourceListener> self = this;
|
||||||
timerPromise
|
timerPromise
|
||||||
->Then(
|
->Then(
|
||||||
@@ -4537,14 +4357,53 @@ void SourceListener::SetEnabledFor(MediaTrack* aTrack, bool aEnable) {
|
|||||||
if (mWindowListener) {
|
if (mWindowListener) {
|
||||||
mWindowListener->ChromeAffectingStateChanged();
|
mWindowListener->ChromeAffectingStateChanged();
|
||||||
}
|
}
|
||||||
if (!state.mOffWhileDisabled || state.mDeviceMuted) {
|
if (!state.mOffWhileDisabled) {
|
||||||
// If the feature to turn a device off while disabled is itself
|
// If the feature to turn a device off while disabled is itself
|
||||||
// disabled, or the device is currently user agent muted, then
|
// disabled we shortcut the device operation and tell the
|
||||||
// we shortcut the device operation and tell the
|
|
||||||
// ux-updating code that everything went fine.
|
// ux-updating code that everything went fine.
|
||||||
return DeviceOperationPromise::CreateAndResolve(NS_OK, __func__);
|
return DeviceOperationPromise::CreateAndResolve(NS_OK, __func__);
|
||||||
}
|
}
|
||||||
return UpdateDevice(track, aEnable);
|
|
||||||
|
nsString inputDeviceGroupId;
|
||||||
|
state.mDevice->GetRawGroupId(inputDeviceGroupId);
|
||||||
|
|
||||||
|
return MediaManager::Dispatch<DeviceOperationPromise>(
|
||||||
|
__func__,
|
||||||
|
[self, device = state.mDevice, aEnable, inputDeviceGroupId](
|
||||||
|
MozPromiseHolder<DeviceOperationPromise>& h) {
|
||||||
|
// Only take this branch when muting, to avoid muting, in case
|
||||||
|
// the default audio output device has changed and we need to
|
||||||
|
// really call `Start` on the source. The AudioInput source
|
||||||
|
// start/stop are idempotent, so this works.
|
||||||
|
if (device->mKind == dom::MediaDeviceKind::Audioinput &&
|
||||||
|
!aEnable) {
|
||||||
|
// Don't turn off the microphone of a device that is on the
|
||||||
|
// same physical device as the output.
|
||||||
|
CubebDeviceEnumerator* enumerator =
|
||||||
|
CubebDeviceEnumerator::GetInstance();
|
||||||
|
// Get the current graph's device info. This is always the
|
||||||
|
// default audio output device for now.
|
||||||
|
RefPtr<AudioDeviceInfo> outputDevice =
|
||||||
|
enumerator->DefaultDevice(
|
||||||
|
CubebDeviceEnumerator::Side::OUTPUT);
|
||||||
|
if (outputDevice &&
|
||||||
|
outputDevice->GroupID().Equals(inputDeviceGroupId)) {
|
||||||
|
LOG("Device group id match when %s, "
|
||||||
|
"not turning the input device off (%s)",
|
||||||
|
aEnable ? "unmuting" : "muting",
|
||||||
|
NS_ConvertUTF16toUTF8(outputDevice->GroupID()).get());
|
||||||
|
h.Resolve(NS_OK, __func__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG("Device group id don't match when %s, "
|
||||||
|
"not turning the audio input device off (%s)",
|
||||||
|
aEnable ? "unmuting" : "muting",
|
||||||
|
NS_ConvertUTF16toUTF8(inputDeviceGroupId).get());
|
||||||
|
h.Resolve(aEnable ? device->Start() : device->Stop(),
|
||||||
|
__func__);
|
||||||
|
});
|
||||||
},
|
},
|
||||||
[]() {
|
[]() {
|
||||||
// Timer was canceled by us. We signal this with NS_ERROR_ABORT.
|
// Timer was canceled by us. We signal this with NS_ERROR_ABORT.
|
||||||
@@ -4566,10 +4425,28 @@ void SourceListener::SetEnabledFor(MediaTrack* aTrack, bool aEnable) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NS_FAILED(aResult) && aResult != NS_ERROR_ABORT && !aEnable) {
|
LOG("SourceListener %p %s %s track for track %p %s", this,
|
||||||
// To keep our internal state sane in this case, we disallow
|
aEnable ? "enabling" : "disabling",
|
||||||
// future stops due to disable.
|
&state == mAudioDeviceState.get() ? "audio" : "video",
|
||||||
state.mOffWhileDisabled = false;
|
track.get(), NS_SUCCEEDED(aResult) ? "succeeded" : "failed");
|
||||||
|
|
||||||
|
if (NS_FAILED(aResult) && aResult != NS_ERROR_ABORT) {
|
||||||
|
// This path handles errors from starting or stopping the device.
|
||||||
|
// NS_ERROR_ABORT are for cases where *we* aborted. They need
|
||||||
|
// graceful handling.
|
||||||
|
if (aEnable) {
|
||||||
|
// Starting the device failed. Stopping the track here will make
|
||||||
|
// the MediaStreamTrack end after a pass through the
|
||||||
|
// MediaTrackGraph.
|
||||||
|
StopTrack(track);
|
||||||
|
} else {
|
||||||
|
// Stopping the device failed. This is odd, but not fatal.
|
||||||
|
MOZ_ASSERT_UNREACHABLE("The device should be stoppable");
|
||||||
|
|
||||||
|
// To keep our internal state sane in this case, we disallow
|
||||||
|
// future stops due to disable.
|
||||||
|
state.mOffWhileDisabled = false;
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4579,58 +4456,22 @@ void SourceListener::SetEnabledFor(MediaTrack* aTrack, bool aEnable) {
|
|||||||
// update the device state if the track state changed in the
|
// update the device state if the track state changed in the
|
||||||
// meantime.
|
// meantime.
|
||||||
|
|
||||||
if (state.mTrackEnabled != state.mDeviceEnabled) {
|
if (state.mTrackEnabled == state.mDeviceEnabled) {
|
||||||
// Track state changed during this operation. We'll start over.
|
// Intended state is same as device's current state.
|
||||||
SetEnabledFor(track, state.mTrackEnabled);
|
// Nothing more to do.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track state changed during this operation. We'll start over.
|
||||||
|
if (state.mTrackEnabled) {
|
||||||
|
SetEnabledFor(track, true);
|
||||||
|
} else {
|
||||||
|
SetEnabledFor(track, false);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[]() { MOZ_ASSERT_UNREACHABLE("Unexpected and unhandled reject"); });
|
[]() { MOZ_ASSERT_UNREACHABLE("Unexpected and unhandled reject"); });
|
||||||
}
|
}
|
||||||
|
|
||||||
void SourceListener::SetMutedFor(LocalTrackSource* aTrackSource, bool aMute) {
|
|
||||||
MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
|
|
||||||
MOZ_ASSERT(Activated(), "No device to set muted state for");
|
|
||||||
|
|
||||||
MediaTrack* track = aTrackSource->mTrack;
|
|
||||||
DeviceState& state = GetDeviceStateFor(track);
|
|
||||||
|
|
||||||
LOG("SourceListener %p %s %s track for track %p", this,
|
|
||||||
aMute ? "muting" : "unmuting",
|
|
||||||
&state == mAudioDeviceState.get() ? "audio" : "video", track);
|
|
||||||
|
|
||||||
if (state.mStopped) {
|
|
||||||
// Device terminally stopped. Updating device state is pointless.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (state.mDeviceMuted == aMute) {
|
|
||||||
// Device is already in the desired state.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG("SourceListener %p %s %s track for track %p - starting device operation",
|
|
||||||
this, aMute ? "muting" : "unmuting",
|
|
||||||
&state == mAudioDeviceState.get() ? "audio" : "video", track);
|
|
||||||
|
|
||||||
state.mDeviceMuted = aMute;
|
|
||||||
|
|
||||||
if (mWindowListener) {
|
|
||||||
mWindowListener->ChromeAffectingStateChanged();
|
|
||||||
}
|
|
||||||
// Update trackSource to fire mute/unmute events on all its tracks
|
|
||||||
if (aMute) {
|
|
||||||
aTrackSource->Mute();
|
|
||||||
} else {
|
|
||||||
aTrackSource->Unmute();
|
|
||||||
}
|
|
||||||
if (state.mOffWhileDisabled && !state.mDeviceEnabled &&
|
|
||||||
state.mDevice->mKind == dom::MediaDeviceKind::Videoinput) {
|
|
||||||
// Camera is already off. TODO: Revisit once we support UA-muting mics.
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
UpdateDevice(track, !aMute);
|
|
||||||
}
|
|
||||||
|
|
||||||
void SourceListener::StopSharing() {
|
void SourceListener::StopSharing() {
|
||||||
MOZ_ASSERT(NS_IsMainThread());
|
MOZ_ASSERT(NS_IsMainThread());
|
||||||
|
|
||||||
@@ -4658,22 +4499,6 @@ void SourceListener::StopSharing() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SourceListener::MuteOrUnmuteCamera(bool aMute) {
|
|
||||||
MOZ_ASSERT(NS_IsMainThread());
|
|
||||||
|
|
||||||
if (mStopped) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
MOZ_RELEASE_ASSERT(mWindowListener);
|
|
||||||
LOG("SourceListener %p MuteOrUnmuteCamera", this);
|
|
||||||
|
|
||||||
if (mVideoDeviceState && (mVideoDeviceState->mDevice->GetMediaSource() ==
|
|
||||||
MediaSourceEnum::Camera)) {
|
|
||||||
SetMutedFor(mVideoDeviceState->mTrackSource, aMute);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool SourceListener::CapturingVideo() const {
|
bool SourceListener::CapturingVideo() const {
|
||||||
MOZ_ASSERT(NS_IsMainThread());
|
MOZ_ASSERT(NS_IsMainThread());
|
||||||
return Activated() && mVideoDeviceState && !mVideoDeviceState->mStopped &&
|
return Activated() && mVideoDeviceState && !mVideoDeviceState->mStopped &&
|
||||||
@@ -4715,9 +4540,9 @@ CaptureState SourceListener::CapturingSource(MediaSourceEnum aSource) const {
|
|||||||
return CaptureState::Off;
|
return CaptureState::Off;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Source is a match and is active and unmuted
|
// Source is a match and is active
|
||||||
|
|
||||||
if (state.mDeviceEnabled && !state.mDeviceMuted) {
|
if (state.mDeviceEnabled) {
|
||||||
return CaptureState::Enabled;
|
return CaptureState::Enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4823,21 +4648,6 @@ void GetUserMediaWindowListener::StopRawID(const nsString& removedDeviceID) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetUserMediaWindowListener::MuteOrUnmuteCameras(bool aMute) {
|
|
||||||
MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread");
|
|
||||||
|
|
||||||
if (mCamerasAreMuted == aMute) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
mCamerasAreMuted = aMute;
|
|
||||||
|
|
||||||
for (auto& source : mActiveListeners) {
|
|
||||||
if (source->GetVideoDevice()) {
|
|
||||||
source->MuteOrUnmuteCamera(aMute);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void GetUserMediaWindowListener::ChromeAffectingStateChanged() {
|
void GetUserMediaWindowListener::ChromeAffectingStateChanged() {
|
||||||
MOZ_ASSERT(NS_IsMainThread());
|
MOZ_ASSERT(NS_IsMainThread());
|
||||||
|
|
||||||
|
|||||||
@@ -254,7 +254,6 @@ class MediaManager final : public nsIMediaManagerService, public nsIObserver {
|
|||||||
const nsString& aDeviceId);
|
const nsString& aDeviceId);
|
||||||
|
|
||||||
void OnNavigation(uint64_t aWindowID);
|
void OnNavigation(uint64_t aWindowID);
|
||||||
void OnCameraMute(bool aMute);
|
|
||||||
bool IsActivelyCapturingOrHasAPermission(uint64_t aWindowId);
|
bool IsActivelyCapturingOrHasAPermission(uint64_t aWindowId);
|
||||||
|
|
||||||
MediaEventSource<void>& DeviceListChangeEvent() {
|
MediaEventSource<void>& DeviceListChangeEvent() {
|
||||||
@@ -342,7 +341,6 @@ class MediaManager final : public nsIMediaManagerService, public nsIObserver {
|
|||||||
nsRefPtrHashtable<nsStringHashKey, GetUserMediaTask> mActiveCallbacks;
|
nsRefPtrHashtable<nsStringHashKey, GetUserMediaTask> mActiveCallbacks;
|
||||||
nsClassHashtable<nsUint64HashKey, nsTArray<nsString>> mCallIds;
|
nsClassHashtable<nsUint64HashKey, nsTArray<nsString>> mCallIds;
|
||||||
nsTArray<RefPtr<dom::GetUserMediaRequest>> mPendingGUMRequest;
|
nsTArray<RefPtr<dom::GetUserMediaRequest>> mPendingGUMRequest;
|
||||||
bool mCamerasMuted = false;
|
|
||||||
|
|
||||||
// Always exists
|
// Always exists
|
||||||
const RefPtr<TaskQueue> mMediaThread;
|
const RefPtr<TaskQueue> mMediaThread;
|
||||||
|
|||||||
@@ -295,8 +295,8 @@ void MediaStreamTrack::SetEnabled(bool aEnabled) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
mTrack->SetDisabledTrackMode(mEnabled ? DisabledTrackMode::ENABLED
|
mTrack->SetEnabled(mEnabled ? DisabledTrackMode::ENABLED
|
||||||
: DisabledTrackMode::SILENCE_BLACK);
|
: DisabledTrackMode::SILENCE_BLACK);
|
||||||
NotifyEnabledChanged();
|
NotifyEnabledChanged();
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -459,11 +459,6 @@ void MediaStreamTrack::MutedChanged(bool aNewState) {
|
|||||||
("MediaStreamTrack %p became %s", this, aNewState ? "muted" : "unmuted"));
|
("MediaStreamTrack %p became %s", this, aNewState ? "muted" : "unmuted"));
|
||||||
|
|
||||||
mMuted = aNewState;
|
mMuted = aNewState;
|
||||||
|
|
||||||
if (Ended()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
nsString eventName = aNewState ? u"mute"_ns : u"unmute"_ns;
|
nsString eventName = aNewState ? u"mute"_ns : u"unmute"_ns;
|
||||||
DispatchTrustedEvent(eventName);
|
DispatchTrustedEvent(eventName);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -589,7 +589,6 @@ class MediaStreamTrack : public DOMEventTargetHelper, public SupportsWeakPtr {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets this track's muted state without raising any events.
|
* Sets this track's muted state without raising any events.
|
||||||
* Only really set by cloning. See MutedChanged for runtime changes.
|
|
||||||
*/
|
*/
|
||||||
void SetMuted(bool aMuted) { mMuted = aMuted; }
|
void SetMuted(bool aMuted) { mMuted = aMuted; }
|
||||||
|
|
||||||
|
|||||||
@@ -864,8 +864,7 @@ void MediaTrackGraphImpl::ProcessInputData() {
|
|||||||
mInputDeviceUsers.GetValue(mInputDeviceID);
|
mInputDeviceUsers.GetValue(mInputDeviceID);
|
||||||
MOZ_ASSERT(listeners);
|
MOZ_ASSERT(listeners);
|
||||||
for (auto& listener : *listeners) {
|
for (auto& listener : *listeners) {
|
||||||
listener->NotifyInputData(this, mInputData, mInputFrames, GraphRate(),
|
listener->NotifyInputData(this, mInputData, mInputFrames, GraphRate(), mInputChannelCount);
|
||||||
mInputChannelCount);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mInputData = nullptr;
|
mInputData = nullptr;
|
||||||
@@ -2181,7 +2180,7 @@ void MediaTrack::AddListenerImpl(
|
|||||||
if (mNotifiedEnded) {
|
if (mNotifiedEnded) {
|
||||||
mTrackListeners.LastElement()->NotifyEnded(Graph());
|
mTrackListeners.LastElement()->NotifyEnded(Graph());
|
||||||
}
|
}
|
||||||
if (CombinedDisabledMode() == DisabledTrackMode::SILENCE_BLACK) {
|
if (mDisabledMode == DisabledTrackMode::SILENCE_BLACK) {
|
||||||
mTrackListeners.LastElement()->NotifyEnabledStateChanged(Graph(), false);
|
mTrackListeners.LastElement()->NotifyEnabledStateChanged(Graph(), false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2316,22 +2315,31 @@ void MediaTrack::RunAfterPendingUpdates(
|
|||||||
graph->AppendMessage(MakeUnique<Message>(this, runnable.forget()));
|
graph->AppendMessage(MakeUnique<Message>(this, runnable.forget()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void MediaTrack::SetDisabledTrackModeImpl(DisabledTrackMode aMode) {
|
void MediaTrack::SetEnabledImpl(DisabledTrackMode aMode) {
|
||||||
MOZ_DIAGNOSTIC_ASSERT(
|
if (aMode == DisabledTrackMode::ENABLED) {
|
||||||
aMode == DisabledTrackMode::ENABLED ||
|
mDisabledMode = DisabledTrackMode::ENABLED;
|
||||||
mDisabledMode == DisabledTrackMode::ENABLED,
|
for (const auto& l : mTrackListeners) {
|
||||||
"Changing disabled track mode for a track is not allowed");
|
l->NotifyEnabledStateChanged(Graph(), true);
|
||||||
DisabledTrackMode oldMode = CombinedDisabledMode();
|
}
|
||||||
mDisabledMode = aMode;
|
} else {
|
||||||
NotifyIfDisabledModeChangedFrom(oldMode);
|
MOZ_DIAGNOSTIC_ASSERT(
|
||||||
|
mDisabledMode == DisabledTrackMode::ENABLED,
|
||||||
|
"Changing disabled track mode for a track is not allowed");
|
||||||
|
mDisabledMode = aMode;
|
||||||
|
if (aMode == DisabledTrackMode::SILENCE_BLACK) {
|
||||||
|
for (const auto& l : mTrackListeners) {
|
||||||
|
l->NotifyEnabledStateChanged(Graph(), false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MediaTrack::SetDisabledTrackMode(DisabledTrackMode aMode) {
|
void MediaTrack::SetEnabled(DisabledTrackMode aMode) {
|
||||||
class Message : public ControlMessage {
|
class Message : public ControlMessage {
|
||||||
public:
|
public:
|
||||||
Message(MediaTrack* aTrack, DisabledTrackMode aMode)
|
Message(MediaTrack* aTrack, DisabledTrackMode aMode)
|
||||||
: ControlMessage(aTrack), mMode(aMode) {}
|
: ControlMessage(aTrack), mMode(aMode) {}
|
||||||
void Run() override { mTrack->SetDisabledTrackModeImpl(mMode); }
|
void Run() override { mTrack->SetEnabledImpl(mMode); }
|
||||||
DisabledTrackMode mMode;
|
DisabledTrackMode mMode;
|
||||||
};
|
};
|
||||||
if (mMainThreadDestroyed) {
|
if (mMainThreadDestroyed) {
|
||||||
@@ -2415,24 +2423,6 @@ void MediaTrack::AdvanceTimeVaryingValuesToCurrentTime(GraphTime aCurrentTime,
|
|||||||
mSegment->ForgetUpTo(mForgottenTime);
|
mSegment->ForgetUpTo(mForgottenTime);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MediaTrack::NotifyIfDisabledModeChangedFrom(DisabledTrackMode aOldMode) {
|
|
||||||
DisabledTrackMode mode = CombinedDisabledMode();
|
|
||||||
if (aOldMode == mode) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto& listener : mTrackListeners) {
|
|
||||||
listener->NotifyEnabledStateChanged(
|
|
||||||
Graph(), mode != DisabledTrackMode::SILENCE_BLACK);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const auto& c : mConsumers) {
|
|
||||||
if (c->GetDestination()) {
|
|
||||||
c->GetDestination()->OnInputDisabledModeChanged(mode);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
SourceMediaTrack::SourceMediaTrack(MediaSegment::Type aType,
|
SourceMediaTrack::SourceMediaTrack(MediaSegment::Type aType,
|
||||||
TrackRate aSampleRate)
|
TrackRate aSampleRate)
|
||||||
: MediaTrack(aSampleRate, aType,
|
: MediaTrack(aSampleRate, aType,
|
||||||
@@ -2766,10 +2756,6 @@ void SourceMediaTrack::AddDirectListenerImpl(
|
|||||||
listener->NotifyDirectListenerInstalled(
|
listener->NotifyDirectListenerInstalled(
|
||||||
DirectMediaTrackListener::InstallationResult::SUCCESS);
|
DirectMediaTrackListener::InstallationResult::SUCCESS);
|
||||||
|
|
||||||
if (mDisabledMode != DisabledTrackMode::ENABLED) {
|
|
||||||
listener->IncreaseDisabled(mDisabledMode);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mEnded) {
|
if (mEnded) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -2816,9 +2802,6 @@ void SourceMediaTrack::RemoveDirectListenerImpl(
|
|||||||
for (int32_t i = mDirectTrackListeners.Length() - 1; i >= 0; --i) {
|
for (int32_t i = mDirectTrackListeners.Length() - 1; i >= 0; --i) {
|
||||||
const RefPtr<DirectMediaTrackListener>& l = mDirectTrackListeners[i];
|
const RefPtr<DirectMediaTrackListener>& l = mDirectTrackListeners[i];
|
||||||
if (l == aListener) {
|
if (l == aListener) {
|
||||||
if (mDisabledMode != DisabledTrackMode::ENABLED) {
|
|
||||||
aListener->DecreaseDisabled(mDisabledMode);
|
|
||||||
}
|
|
||||||
aListener->NotifyDirectListenerUninstalled();
|
aListener->NotifyDirectListenerUninstalled();
|
||||||
mDirectTrackListeners.RemoveElementAt(i);
|
mDirectTrackListeners.RemoveElementAt(i);
|
||||||
}
|
}
|
||||||
@@ -2840,7 +2823,7 @@ void SourceMediaTrack::End() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SourceMediaTrack::SetDisabledTrackModeImpl(DisabledTrackMode aMode) {
|
void SourceMediaTrack::SetEnabledImpl(DisabledTrackMode aMode) {
|
||||||
{
|
{
|
||||||
MutexAutoLock lock(mMutex);
|
MutexAutoLock lock(mMutex);
|
||||||
for (const auto& l : mDirectTrackListeners) {
|
for (const auto& l : mDirectTrackListeners) {
|
||||||
@@ -2859,7 +2842,7 @@ void SourceMediaTrack::SetDisabledTrackModeImpl(DisabledTrackMode aMode) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
MediaTrack::SetDisabledTrackModeImpl(aMode);
|
MediaTrack::SetEnabledImpl(aMode);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SourceMediaTrack::RemoveAllDirectListenersImpl() {
|
void SourceMediaTrack::RemoveAllDirectListenersImpl() {
|
||||||
|
|||||||
@@ -329,7 +329,7 @@ class MediaTrack : public mozilla::LinkedListElement<MediaTrack> {
|
|||||||
|
|
||||||
// A disabled track has video replaced by black, and audio replaced by
|
// A disabled track has video replaced by black, and audio replaced by
|
||||||
// silence.
|
// silence.
|
||||||
void SetDisabledTrackMode(DisabledTrackMode aMode);
|
void SetEnabled(DisabledTrackMode aMode);
|
||||||
|
|
||||||
// End event will be notified by calling methods of aListener. It is the
|
// End event will be notified by calling methods of aListener. It is the
|
||||||
// responsibility of the caller to remove aListener before it is destroyed.
|
// responsibility of the caller to remove aListener before it is destroyed.
|
||||||
@@ -411,7 +411,7 @@ class MediaTrack : public mozilla::LinkedListElement<MediaTrack> {
|
|||||||
virtual void AddDirectListenerImpl(
|
virtual void AddDirectListenerImpl(
|
||||||
already_AddRefed<DirectMediaTrackListener> aListener);
|
already_AddRefed<DirectMediaTrackListener> aListener);
|
||||||
virtual void RemoveDirectListenerImpl(DirectMediaTrackListener* aListener);
|
virtual void RemoveDirectListenerImpl(DirectMediaTrackListener* aListener);
|
||||||
virtual void SetDisabledTrackModeImpl(DisabledTrackMode aMode);
|
virtual void SetEnabledImpl(DisabledTrackMode aMode);
|
||||||
|
|
||||||
void AddConsumer(MediaInputPort* aPort) { mConsumers.AppendElement(aPort); }
|
void AddConsumer(MediaInputPort* aPort) { mConsumers.AppendElement(aPort); }
|
||||||
void RemoveConsumer(MediaInputPort* aPort) {
|
void RemoveConsumer(MediaInputPort* aPort) {
|
||||||
@@ -420,12 +420,6 @@ class MediaTrack : public mozilla::LinkedListElement<MediaTrack> {
|
|||||||
GraphTime StartTime() const { return mStartTime; }
|
GraphTime StartTime() const { return mStartTime; }
|
||||||
bool Ended() const { return mEnded; }
|
bool Ended() const { return mEnded; }
|
||||||
|
|
||||||
// The DisabledTrackMode after combining the explicit mode and that of the
|
|
||||||
// input, if any.
|
|
||||||
virtual DisabledTrackMode CombinedDisabledMode() const {
|
|
||||||
return mDisabledMode;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class SegmentType>
|
template <class SegmentType>
|
||||||
SegmentType* GetData() const {
|
SegmentType* GetData() const {
|
||||||
if (!mSegment) {
|
if (!mSegment) {
|
||||||
@@ -532,10 +526,6 @@ class MediaTrack : public mozilla::LinkedListElement<MediaTrack> {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Notifies listeners and consumers of the change in disabled mode when the
|
|
||||||
// current combined mode is different from aMode.
|
|
||||||
void NotifyIfDisabledModeChangedFrom(DisabledTrackMode aOldMode);
|
|
||||||
|
|
||||||
// This state is all initialized on the main thread but
|
// This state is all initialized on the main thread but
|
||||||
// otherwise modified only on the media graph thread.
|
// otherwise modified only on the media graph thread.
|
||||||
|
|
||||||
@@ -681,7 +671,7 @@ class SourceMediaTrack : public MediaTrack {
|
|||||||
|
|
||||||
// Overriding allows us to hold the mMutex lock while changing the track
|
// Overriding allows us to hold the mMutex lock while changing the track
|
||||||
// enable status
|
// enable status
|
||||||
void SetDisabledTrackModeImpl(DisabledTrackMode aMode) override;
|
void SetEnabledImpl(DisabledTrackMode aMode) override;
|
||||||
|
|
||||||
// Overriding allows us to ensure mMutex is locked while changing the track
|
// Overriding allows us to ensure mMutex is locked while changing the track
|
||||||
// enable status
|
// enable status
|
||||||
@@ -971,9 +961,6 @@ class ProcessedMediaTrack : public MediaTrack {
|
|||||||
// true for echo loops, only for muted cycles.
|
// true for echo loops, only for muted cycles.
|
||||||
bool InMutedCycle() const { return mCycleMarker; }
|
bool InMutedCycle() const { return mCycleMarker; }
|
||||||
|
|
||||||
// Used by ForwardedInputTrack to propagate the disabled mode along the graph.
|
|
||||||
virtual void OnInputDisabledModeChanged(DisabledTrackMode aMode) {}
|
|
||||||
|
|
||||||
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override {
|
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override {
|
||||||
size_t amount = MediaTrack::SizeOfExcludingThis(aMallocSizeOf);
|
size_t amount = MediaTrack::SizeOfExcludingThis(aMallocSizeOf);
|
||||||
// Not owned:
|
// Not owned:
|
||||||
|
|||||||
@@ -139,22 +139,14 @@ class VideoFrameConverter {
|
|||||||
("VideoFrameConverter Track is now %s",
|
("VideoFrameConverter Track is now %s",
|
||||||
aTrackEnabled ? "enabled" : "disabled"));
|
aTrackEnabled ? "enabled" : "disabled"));
|
||||||
mTrackEnabled = aTrackEnabled;
|
mTrackEnabled = aTrackEnabled;
|
||||||
if (!aTrackEnabled) {
|
if (!aTrackEnabled && mLastFrameConverted) {
|
||||||
// After disabling we immediately send a frame as black, so it can
|
// After disabling, we re-send the last frame as black in case the
|
||||||
// be seen quickly, even if no frames are flowing.
|
// source had already stopped and no frame is coming soon.
|
||||||
if (mLastFrameConverted) {
|
ProcessVideoFrame(
|
||||||
// This track has already seen frames so we re-send the last one
|
FrameToProcess{nullptr, TimeStamp::Now(),
|
||||||
// as black.
|
gfx::IntSize(mLastFrameConverted->width(),
|
||||||
ProcessVideoFrame(
|
mLastFrameConverted->height()),
|
||||||
FrameToProcess{nullptr, TimeStamp::Now(),
|
true});
|
||||||
gfx::IntSize(mLastFrameConverted->width(),
|
|
||||||
mLastFrameConverted->height()),
|
|
||||||
true});
|
|
||||||
} else {
|
|
||||||
// This track has not yet seen any frame. We make one up.
|
|
||||||
ProcessVideoFrame(FrameToProcess{nullptr, TimeStamp::Now(),
|
|
||||||
gfx::IntSize(640, 480), true});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
|
MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
|
||||||
|
|||||||
@@ -185,29 +185,12 @@ class VideoOutput : public DirectMediaTrackListener {
|
|||||||
bool aEnabled) override {
|
bool aEnabled) override {
|
||||||
MutexAutoLock lock(mMutex);
|
MutexAutoLock lock(mMutex);
|
||||||
mEnabled = aEnabled;
|
mEnabled = aEnabled;
|
||||||
DropPastFrames();
|
// Since mEnabled will affect whether frames are real, or black, we assign
|
||||||
if (!mEnabled || mFrames.Length() > 1) {
|
// new FrameIDs whenever this changes.
|
||||||
// Re-send frames when disabling, as new frames may not arrive. When
|
for (auto& idChunkPair : mFrames) {
|
||||||
// enabling we keep them black until new frames arrive, or re-send if we
|
idChunkPair.first = mVideoFrameContainer->NewFrameID();
|
||||||
// already have frames in the future. If we're disabling and there are no
|
|
||||||
// frames available yet, we invent one. Unfortunately with a hardcoded
|
|
||||||
// size.
|
|
||||||
//
|
|
||||||
// Since mEnabled will affect whether
|
|
||||||
// frames are real, or black, we assign new FrameIDs whenever we re-send
|
|
||||||
// frames after an mEnabled change.
|
|
||||||
for (auto& idChunkPair : mFrames) {
|
|
||||||
idChunkPair.first = mVideoFrameContainer->NewFrameID();
|
|
||||||
}
|
|
||||||
if (mFrames.IsEmpty()) {
|
|
||||||
VideoSegment v;
|
|
||||||
v.AppendFrame(nullptr, gfx::IntSize(640, 480), PRINCIPAL_HANDLE_NONE,
|
|
||||||
true, TimeStamp::Now());
|
|
||||||
mFrames.AppendElement(std::make_pair(mVideoFrameContainer->NewFrameID(),
|
|
||||||
*v.GetLastChunk()));
|
|
||||||
}
|
|
||||||
SendFramesEnsureLocked();
|
|
||||||
}
|
}
|
||||||
|
SendFramesEnsureLocked();
|
||||||
}
|
}
|
||||||
|
|
||||||
Mutex mMutex;
|
Mutex mMutex;
|
||||||
|
|||||||
@@ -1279,46 +1279,6 @@ TEST(VP8VideoTrackEncoder, DisableBetweenFrames)
|
|||||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->mDuration);
|
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->mDuration);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that an encoding which is disabled before the first frame becomes black
|
|
||||||
// immediately.
|
|
||||||
TEST(VP8VideoTrackEncoder, DisableBeforeFirstFrame)
|
|
||||||
{
|
|
||||||
TestVP8TrackEncoder encoder;
|
|
||||||
YUVBufferGenerator generator;
|
|
||||||
generator.Init(mozilla::gfx::IntSize(640, 480));
|
|
||||||
nsTArray<RefPtr<EncodedFrame>> frames;
|
|
||||||
TimeStamp now = TimeStamp::Now();
|
|
||||||
|
|
||||||
// Disable the track at t=0.
|
|
||||||
// Pass a frame in at t=50ms.
|
|
||||||
// Enable the track at t=100ms.
|
|
||||||
// Stop encoding at t=200ms.
|
|
||||||
// Should yield 2 frames, 1 black [0, 100); 1 real [100, 200).
|
|
||||||
|
|
||||||
VideoSegment segment;
|
|
||||||
segment.AppendFrame(generator.GenerateI420Image(), generator.GetSize(),
|
|
||||||
PRINCIPAL_HANDLE_NONE, false,
|
|
||||||
now + TimeDuration::FromMilliseconds(50));
|
|
||||||
|
|
||||||
encoder.SetStartOffset(now);
|
|
||||||
encoder.Disable(now);
|
|
||||||
encoder.AppendVideoSegment(std::move(segment));
|
|
||||||
|
|
||||||
encoder.Enable(now + TimeDuration::FromMilliseconds(100));
|
|
||||||
encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200));
|
|
||||||
encoder.NotifyEndOfStream();
|
|
||||||
ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames)));
|
|
||||||
EXPECT_TRUE(encoder.IsEncodingComplete());
|
|
||||||
|
|
||||||
ASSERT_EQ(2UL, frames.Length());
|
|
||||||
|
|
||||||
// [0, 100ms)
|
|
||||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration);
|
|
||||||
|
|
||||||
// [100ms, 200ms)
|
|
||||||
EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[1]->mDuration);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test that an encoding which is enabled on a frame timestamp encodes
|
// Test that an encoding which is enabled on a frame timestamp encodes
|
||||||
// frames as expected.
|
// frames as expected.
|
||||||
TEST(VP8VideoTrackEncoder, EnableOnFrameTime)
|
TEST(VP8VideoTrackEncoder, EnableOnFrameTime)
|
||||||
|
|||||||
@@ -547,7 +547,7 @@ AudioDestinationNode::WindowSuspendChanged(nsSuspendedTypes aSuspend) {
|
|||||||
|
|
||||||
DisabledTrackMode disabledMode =
|
DisabledTrackMode disabledMode =
|
||||||
suspended ? DisabledTrackMode::SILENCE_BLACK : DisabledTrackMode::ENABLED;
|
suspended ? DisabledTrackMode::SILENCE_BLACK : DisabledTrackMode::ENABLED;
|
||||||
mTrack->SetDisabledTrackMode(disabledMode);
|
mTrack->SetEnabled(disabledMode);
|
||||||
|
|
||||||
AudioChannelService::AudibleState audible =
|
AudioChannelService::AudibleState audible =
|
||||||
aSuspend == nsISuspendedTypes::NONE_SUSPENDED
|
aSuspend == nsISuspendedTypes::NONE_SUSPENDED
|
||||||
|
|||||||
@@ -724,11 +724,11 @@ class MediaPipelineTransmit::PipelineListener
|
|||||||
->SendVideoFrame(aVideoFrame);
|
->SendVideoFrame(aVideoFrame);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SetTrackEnabled(MediaStreamTrack* aTrack, bool aEnabled);
|
||||||
|
|
||||||
// Implement MediaTrackListener
|
// Implement MediaTrackListener
|
||||||
void NotifyQueuedChanges(MediaTrackGraph* aGraph, TrackTime aOffset,
|
void NotifyQueuedChanges(MediaTrackGraph* aGraph, TrackTime aOffset,
|
||||||
const MediaSegment& aQueuedMedia) override;
|
const MediaSegment& aQueuedMedia) override;
|
||||||
void NotifyEnabledStateChanged(MediaTrackGraph* aGraph,
|
|
||||||
bool aEnabled) override;
|
|
||||||
|
|
||||||
// Implement DirectMediaTrackListener
|
// Implement DirectMediaTrackListener
|
||||||
void NotifyRealtimeTrackData(MediaTrackGraph* aGraph, TrackTime aOffset,
|
void NotifyRealtimeTrackData(MediaTrackGraph* aGraph, TrackTime aOffset,
|
||||||
@@ -753,6 +753,29 @@ class MediaPipelineTransmit::PipelineListener
|
|||||||
bool mDirectConnect;
|
bool mDirectConnect;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// MediaStreamTrackConsumer inherits from SupportsWeakPtr, which is
|
||||||
|
// main-thread-only.
|
||||||
|
class MediaPipelineTransmit::PipelineListenerTrackConsumer
|
||||||
|
: public MediaStreamTrackConsumer {
|
||||||
|
virtual ~PipelineListenerTrackConsumer() { MOZ_ASSERT(NS_IsMainThread()); }
|
||||||
|
|
||||||
|
const RefPtr<PipelineListener> mListener;
|
||||||
|
|
||||||
|
public:
|
||||||
|
NS_INLINE_DECL_REFCOUNTING(PipelineListenerTrackConsumer)
|
||||||
|
|
||||||
|
explicit PipelineListenerTrackConsumer(RefPtr<PipelineListener> aListener)
|
||||||
|
: mListener(std::move(aListener)) {
|
||||||
|
MOZ_ASSERT(NS_IsMainThread());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement MediaStreamTrackConsumer
|
||||||
|
void NotifyEnabledChanged(MediaStreamTrack* aTrack, bool aEnabled) override {
|
||||||
|
MOZ_ASSERT(NS_IsMainThread());
|
||||||
|
mListener->SetTrackEnabled(aTrack, aEnabled);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Implements VideoConverterListener for MediaPipeline.
|
// Implements VideoConverterListener for MediaPipeline.
|
||||||
//
|
//
|
||||||
// We pass converted frames on to MediaPipelineTransmit::PipelineListener
|
// We pass converted frames on to MediaPipelineTransmit::PipelineListener
|
||||||
@@ -799,6 +822,10 @@ MediaPipelineTransmit::MediaPipelineTransmit(
|
|||||||
std::move(aConduit)),
|
std::move(aConduit)),
|
||||||
mIsVideo(aIsVideo),
|
mIsVideo(aIsVideo),
|
||||||
mListener(new PipelineListener(mConduit)),
|
mListener(new PipelineListener(mConduit)),
|
||||||
|
mTrackConsumer(
|
||||||
|
MakeAndAddRef<nsMainThreadPtrHolder<PipelineListenerTrackConsumer>>(
|
||||||
|
"MediaPipelineTransmit::mTrackConsumer",
|
||||||
|
MakeAndAddRef<PipelineListenerTrackConsumer>(mListener))),
|
||||||
mFeeder(aIsVideo ? MakeAndAddRef<VideoFrameFeeder>(mListener)
|
mFeeder(aIsVideo ? MakeAndAddRef<VideoFrameFeeder>(mListener)
|
||||||
: nullptr), // For video we send frames to an
|
: nullptr), // For video we send frames to an
|
||||||
// async VideoFrameConverter that
|
// async VideoFrameConverter that
|
||||||
@@ -908,10 +935,10 @@ void MediaPipelineTransmit::Start() {
|
|||||||
|
|
||||||
mSendTrack->Resume();
|
mSendTrack->Resume();
|
||||||
|
|
||||||
mSendTrack->AddListener(mListener);
|
|
||||||
if (mSendTrack->mType == MediaSegment::VIDEO) {
|
if (mSendTrack->mType == MediaSegment::VIDEO) {
|
||||||
mSendTrack->AddDirectListener(mListener);
|
mSendTrack->AddDirectListener(mListener);
|
||||||
}
|
}
|
||||||
|
mSendTrack->AddListener(mListener);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MediaPipelineTransmit::IsVideo() const { return mIsVideo; }
|
bool MediaPipelineTransmit::IsVideo() const { return mIsVideo; }
|
||||||
@@ -951,7 +978,10 @@ void MediaPipelineTransmit::UpdateSinkIdentity_m(
|
|||||||
void MediaPipelineTransmit::DetachMedia() {
|
void MediaPipelineTransmit::DetachMedia() {
|
||||||
ASSERT_ON_THREAD(mMainThread);
|
ASSERT_ON_THREAD(mMainThread);
|
||||||
MOZ_ASSERT(!mTransmitting);
|
MOZ_ASSERT(!mTransmitting);
|
||||||
mDomTrack = nullptr;
|
if (mDomTrack) {
|
||||||
|
mDomTrack->RemoveConsumer(mTrackConsumer);
|
||||||
|
mDomTrack = nullptr;
|
||||||
|
}
|
||||||
if (mSendPort) {
|
if (mSendPort) {
|
||||||
mSendPort->Destroy();
|
mSendPort->Destroy();
|
||||||
mSendPort = nullptr;
|
mSendPort = nullptr;
|
||||||
@@ -1006,6 +1036,9 @@ nsresult MediaPipelineTransmit::SetTrack(RefPtr<MediaStreamTrack> aDomTrack) {
|
|||||||
(mConduit->type() == MediaSessionConduit::AUDIO ? "audio" : "video")));
|
(mConduit->type() == MediaSessionConduit::AUDIO ? "audio" : "video")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mDomTrack) {
|
||||||
|
mDomTrack->RemoveConsumer(mTrackConsumer);
|
||||||
|
}
|
||||||
if (mSendPort) {
|
if (mSendPort) {
|
||||||
mSendPort->Destroy();
|
mSendPort->Destroy();
|
||||||
mSendPort = nullptr;
|
mSendPort = nullptr;
|
||||||
@@ -1043,6 +1076,10 @@ nsresult MediaPipelineTransmit::SetTrack(RefPtr<MediaStreamTrack> aDomTrack) {
|
|||||||
}
|
}
|
||||||
mSendPort = mSendTrack->AllocateInputPort(mDomTrack->GetTrack());
|
mSendPort = mSendTrack->AllocateInputPort(mDomTrack->GetTrack());
|
||||||
}
|
}
|
||||||
|
mDomTrack->AddConsumer(mTrackConsumer);
|
||||||
|
if (mConverter) {
|
||||||
|
mConverter->SetTrackEnabled(mDomTrack->Enabled());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return NS_OK;
|
return NS_OK;
|
||||||
@@ -1173,11 +1210,13 @@ void MediaPipelineTransmit::PipelineListener::NotifyQueuedChanges(
|
|||||||
NewData(aQueuedMedia, rate);
|
NewData(aQueuedMedia, rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MediaPipelineTransmit::PipelineListener::NotifyEnabledStateChanged(
|
void MediaPipelineTransmit::PipelineListener::SetTrackEnabled(
|
||||||
MediaTrackGraph* aGraph, bool aEnabled) {
|
MediaStreamTrack* aTrack, bool aEnabled) {
|
||||||
|
MOZ_ASSERT(NS_IsMainThread());
|
||||||
if (mConduit->type() != MediaSessionConduit::VIDEO) {
|
if (mConduit->type() != MediaSessionConduit::VIDEO) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
MOZ_ASSERT(mConverter);
|
MOZ_ASSERT(mConverter);
|
||||||
mConverter->SetTrackEnabled(aEnabled);
|
mConverter->SetTrackEnabled(aEnabled);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -310,6 +310,7 @@ class MediaPipelineTransmit : public MediaPipeline {
|
|||||||
|
|
||||||
// Separate classes to allow ref counting
|
// Separate classes to allow ref counting
|
||||||
class PipelineListener;
|
class PipelineListener;
|
||||||
|
class PipelineListenerTrackConsumer;
|
||||||
class VideoFrameFeeder;
|
class VideoFrameFeeder;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@@ -322,6 +323,9 @@ class MediaPipelineTransmit : public MediaPipeline {
|
|||||||
|
|
||||||
const bool mIsVideo;
|
const bool mIsVideo;
|
||||||
const RefPtr<PipelineListener> mListener;
|
const RefPtr<PipelineListener> mListener;
|
||||||
|
// Listens for changes in enabled state on the attached MediaStreamTrack, and
|
||||||
|
// notifies mListener.
|
||||||
|
const nsMainThreadPtrHandle<PipelineListenerTrackConsumer> mTrackConsumer;
|
||||||
const RefPtr<VideoFrameFeeder> mFeeder;
|
const RefPtr<VideoFrameFeeder> mFeeder;
|
||||||
RefPtr<AudioProxyThread> mAudioProcessing;
|
RefPtr<AudioProxyThread> mAudioProcessing;
|
||||||
RefPtr<VideoFrameConverter> mConverter;
|
RefPtr<VideoFrameConverter> mConverter;
|
||||||
|
|||||||
@@ -7405,13 +7405,6 @@
|
|||||||
value: @IS_NOT_ANDROID@
|
value: @IS_NOT_ANDROID@
|
||||||
mirror: always
|
mirror: always
|
||||||
|
|
||||||
# Turn off any cameras (but not mics) while in the background. This is desirable
|
|
||||||
# on mobile.
|
|
||||||
- name: media.getusermedia.camera.background.mute.enabled
|
|
||||||
type: bool
|
|
||||||
value: @IS_ANDROID@
|
|
||||||
mirror: always
|
|
||||||
|
|
||||||
# WebRTC prefs follow
|
# WebRTC prefs follow
|
||||||
|
|
||||||
# Enables RTCPeerConnection support. Note that, when true, this pref enables
|
# Enables RTCPeerConnection support. Note that, when true, this pref enables
|
||||||
|
|||||||
Reference in New Issue
Block a user