Merge remote-tracking branch 'origin/develop' into jryans/convert-flow-to-ts

This commit is contained in:
J. Ryan Stinnett 2021-04-23 17:19:52 +01:00
commit 2344ddd164
10 changed files with 279 additions and 29 deletions

View file

@ -135,4 +135,30 @@ declare global {
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Error/columnNumber
columnNumber?: number;
}
// https://github.com/microsoft/TypeScript/issues/28308#issuecomment-650802278
interface AudioWorkletProcessor {
readonly port: MessagePort;
process(
inputs: Float32Array[][],
outputs: Float32Array[][],
parameters: Record<string, Float32Array>
): boolean;
}
// https://github.com/microsoft/TypeScript/issues/28308#issuecomment-650802278
const AudioWorkletProcessor: {
prototype: AudioWorkletProcessor;
new (options?: AudioWorkletNodeOptions): AudioWorkletProcessor;
};
// https://github.com/microsoft/TypeScript/issues/28308#issuecomment-650802278
function registerProcessor(
name: string,
processorCtor: (new (
options?: AudioWorkletNodeOptions
) => AudioWorkletProcessor) & {
parameterDescriptors?: AudioParamDescriptor[];
}
);
}

View file

@ -130,11 +130,14 @@ export function sanitizedHtmlNode(insaneHtml: string) {
return <div dangerouslySetInnerHTML={{ __html: saneHtml }} dir="auto" />;
}
export function sanitizedHtmlNodeInnerText(insaneHtml: string) {
const saneHtml = sanitizeHtml(insaneHtml, sanitizeHtmlParams);
const contentDiv = document.createElement("div");
contentDiv.innerHTML = saneHtml;
return contentDiv.innerText;
export function getHtmlText(insaneHtml: string) {
return sanitizeHtml(insaneHtml, {
allowedTags: [],
allowedAttributes: {},
selfClosing: [],
allowedSchemes: [],
disallowedTagsMode: 'discard',
})
}
/**

View file

@ -32,13 +32,14 @@ import dis from '../../../dispatcher/dispatcher';
import {replaceableComponent} from "../../../utils/replaceableComponent";
import {RoomPermalinkCreator} from "../../../utils/permalinks/Permalinks"
import {MatrixEvent} from "matrix-js-sdk/src/models/event";
import {normalizeWheelEvent} from "../../../utils/Mouse";
const MIN_ZOOM = 100;
const MAX_ZOOM = 300;
// This is used for the buttons
const ZOOM_STEP = 10;
// This is used for mouse wheel events
const ZOOM_COEFFICIENT = 7.5;
const ZOOM_COEFFICIENT = 0.5;
// If we have moved only this much we can zoom
const ZOOM_DISTANCE = 10;
@ -115,7 +116,9 @@ export default class ImageView extends React.Component<IProps, IState> {
private onWheel = (ev: WheelEvent) => {
ev.stopPropagation();
ev.preventDefault();
const newZoom = this.state.zoom - (ev.deltaY * ZOOM_COEFFICIENT);
const {deltaY} = normalizeWheelEvent(ev);
const newZoom = this.state.zoom - (deltaY * ZOOM_COEFFICIENT);
if (newZoom <= MIN_ZOOM) {
this.setState({

View file

@ -53,9 +53,38 @@ export default class VoiceRecordComposerTile extends React.PureComponent<IProps,
await this.state.recorder.stop();
const mxc = await this.state.recorder.upload();
MatrixClientPeg.get().sendMessage(this.props.room.roomId, {
body: "Voice message",
msgtype: "org.matrix.msc2516.voice",
url: mxc,
"body": "Voice message",
"msgtype": "org.matrix.msc2516.voice",
//"msgtype": MsgType.Audio,
"url": mxc,
"info": {
duration: Math.round(this.state.recorder.durationSeconds * 1000),
mimetype: this.state.recorder.contentType,
size: this.state.recorder.contentLength,
},
// MSC1767 experiment
"org.matrix.msc1767.text": "Voice message",
"org.matrix.msc1767.file": {
url: mxc,
name: "Voice message.ogg",
mimetype: this.state.recorder.contentType,
size: this.state.recorder.contentLength,
},
"org.matrix.msc1767.audio": {
duration: Math.round(this.state.recorder.durationSeconds * 1000),
// TODO: @@ TravisR: Waveform? (MSC1767 decision)
},
"org.matrix.experimental.msc2516.voice": { // MSC2516+MSC1767 experiment
duration: Math.round(this.state.recorder.durationSeconds * 1000),
// Events can't have floats, so we try to maintain resolution by using 1024
// as a maximum value. The waveform contains values between zero and 1, so this
// should come out largely sane.
//
// We're expecting about one data point per second of audio.
waveform: this.state.recorder.finalWaveform.map(v => Math.round(v * 1024)),
},
});
await VoiceRecordingStore.instance.disposeRecording();
this.setState({recorder: null});

View file

@ -20,7 +20,7 @@ import { MatrixEvent } from "matrix-js-sdk/src/models/event";
import { _t } from "../../../languageHandler";
import { getSenderName, isSelf, shouldPrefixMessagesIn } from "./utils";
import ReplyThread from "../../../components/views/elements/ReplyThread";
import { sanitizedHtmlNodeInnerText } from "../../../HtmlUtils";
import { getHtmlText } from "../../../HtmlUtils";
export class MessageEventPreview implements IPreview {
public getTextFor(event: MatrixEvent, tagId?: TagID): string {
@ -55,7 +55,7 @@ export class MessageEventPreview implements IPreview {
}
if (hasHtml) {
body = sanitizedHtmlNodeInnerText(body);
body = getHtmlText(body);
}
if (msgtype === 'm.emote') {

50
src/utils/Mouse.ts Normal file
View file

@ -0,0 +1,50 @@
/*
Copyright 2021 Šimon Brandner <simon.bra.ag@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* Different browsers use different deltaModes. This causes different behaviour.
* To avoid that we use this function to convert any event to pixels.
* @param {WheelEvent} event to normalize
* @returns {WheelEvent} normalized event event
*/
export function normalizeWheelEvent(event: WheelEvent): WheelEvent {
const LINE_HEIGHT = 18;
let deltaX;
let deltaY;
let deltaZ;
if (event.deltaMode === 1) { // Units are lines
deltaX = (event.deltaX * LINE_HEIGHT);
deltaY = (event.deltaY * LINE_HEIGHT);
deltaZ = (event.deltaZ * LINE_HEIGHT);
} else {
deltaX = event.deltaX;
deltaY = event.deltaY;
deltaZ = event.deltaZ;
}
return new WheelEvent(
"syntheticWheel",
{
deltaMode: 0,
deltaY: deltaY,
deltaX: deltaX,
deltaZ: deltaZ,
...event,
},
);
}

View file

@ -54,7 +54,7 @@ export function arraySeed<T>(val: T, length: number): T[] {
* @param a The array to clone. Must be defined.
* @returns A copy of the array.
*/
export function arrayFastClone(a: any[]): any[] {
export function arrayFastClone<T>(a: T[]): T[] {
return a.slice(0, a.length);
}

View file

@ -0,0 +1,67 @@
/*
Copyright 2021 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import {IAmplitudePayload, ITimingPayload, PayloadEvent, WORKLET_NAME} from "./consts";
import {percentageOf} from "../utils/numbers";
// from AudioWorkletGlobalScope: https://developer.mozilla.org/en-US/docs/Web/API/AudioWorkletGlobalScope
declare const currentTime: number;
// declare const currentFrame: number;
// declare const sampleRate: number;
class MxVoiceWorklet extends AudioWorkletProcessor {
private nextAmplitudeSecond = 0;
process(inputs, outputs, parameters) {
// We only fire amplitude updates once a second to avoid flooding the recording instance
// with useless data. Much of the data would end up discarded, so we ratelimit ourselves
// here.
const currentSecond = Math.round(currentTime);
if (currentSecond === this.nextAmplitudeSecond) {
// We're expecting exactly one mono input source, so just grab the very first frame of
// samples for the analysis.
const monoChan = inputs[0][0];
// The amplitude of the frame's samples is effectively the loudness of the frame. This
// translates into a bar which can be rendered as part of the whole recording clip's
// waveform.
//
// We translate the amplitude down to 0-1 for sanity's sake.
const minVal = Math.min(...monoChan);
const maxVal = Math.max(...monoChan);
const amplitude = percentageOf(maxVal, -1, 1) - percentageOf(minVal, -1, 1);
this.port.postMessage(<IAmplitudePayload>{
ev: PayloadEvent.AmplitudeMark,
amplitude: amplitude,
forSecond: currentSecond,
});
this.nextAmplitudeSecond++;
}
// We mostly use this worklet to fire regular clock updates through to components
this.port.postMessage(<ITimingPayload>{ev: PayloadEvent.Timekeep, timeSeconds: currentTime});
// We're supposed to return false when we're "done" with the audio clip, but seeing as
// we are acting as a passive processor we are never truly "done". The browser will clean
// us up when it is done with us.
return true;
}
}
registerProcessor(WORKLET_NAME, MxVoiceWorklet);
export default null; // to appease module loaders (we never use the export)

View file

@ -23,6 +23,8 @@ import {clamp} from "../utils/numbers";
import EventEmitter from "events";
import {IDestroyable} from "../utils/IDestroyable";
import {Singleflight} from "../utils/Singleflight";
import {PayloadEvent, WORKLET_NAME} from "./consts";
import {arrayFastClone} from "../utils/arrays";
const CHANNELS = 1; // stereo isn't important
const SAMPLE_RATE = 48000; // 48khz is what WebRTC uses. 12khz is where we lose quality.
@ -49,16 +51,34 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
private recorderSource: MediaStreamAudioSourceNode;
private recorderStream: MediaStream;
private recorderFFT: AnalyserNode;
private recorderProcessor: ScriptProcessorNode;
private recorderWorklet: AudioWorkletNode;
private buffer = new Uint8Array(0);
private mxc: string;
private recording = false;
private observable: SimpleObservable<IRecordingUpdate>;
private amplitudes: number[] = []; // at each second mark, generated
public constructor(private client: MatrixClient) {
super();
}
public get finalWaveform(): number[] {
return arrayFastClone(this.amplitudes);
}
public get contentType(): string {
return "audio/ogg";
}
public get contentLength(): number {
return this.buffer.length;
}
public get durationSeconds(): number {
if (!this.recorder) throw new Error("Duration not available without a recording");
return this.recorderContext.currentTime;
}
private async makeRecorder() {
this.recorderStream = await navigator.mediaDevices.getUserMedia({
audio: {
@ -80,18 +100,34 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
// it makes the time domain less than helpful.
this.recorderFFT.fftSize = 64;
// We use an audio processor to get accurate timing information.
// The size of the audio buffer largely decides how quickly we push timing/waveform data
// out of this class. Smaller buffers mean we update more frequently as we can't hold as
// many bytes. Larger buffers mean slower updates. For scale, 1024 gives us about 30Hz of
// updates and 2048 gives us about 20Hz. We use 1024 to get as close to perceived realtime
// as possible. Must be a power of 2.
this.recorderProcessor = this.recorderContext.createScriptProcessor(1024, CHANNELS, CHANNELS);
// Set up our worklet. We use this for timing information and waveform analysis: the
// web audio API prefers this be done async to avoid holding the main thread with math.
const mxRecorderWorkletPath = document.body.dataset.vectorRecorderWorkletScript;
if (!mxRecorderWorkletPath) {
throw new Error("Unable to create recorder: no worklet script registered");
}
await this.recorderContext.audioWorklet.addModule(mxRecorderWorkletPath);
this.recorderWorklet = new AudioWorkletNode(this.recorderContext, WORKLET_NAME);
// Connect our inputs and outputs
this.recorderSource.connect(this.recorderFFT);
this.recorderSource.connect(this.recorderProcessor);
this.recorderProcessor.connect(this.recorderContext.destination);
this.recorderSource.connect(this.recorderWorklet);
this.recorderWorklet.connect(this.recorderContext.destination);
// Dev note: we can't use `addEventListener` for some reason. It just doesn't work.
this.recorderWorklet.port.onmessage = (ev) => {
switch (ev.data['ev']) {
case PayloadEvent.Timekeep:
this.processAudioUpdate(ev.data['timeSeconds']);
break;
case PayloadEvent.AmplitudeMark:
// Sanity check to make sure we're adding about one sample per second
if (ev.data['forSecond'] === this.amplitudes.length) {
this.amplitudes.push(ev.data['amplitude']);
}
break;
}
};
this.recorder = new Recorder({
encoderPath, // magic from webpack
@ -138,7 +174,7 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
return this.mxc;
}
private processAudioUpdate = (ev: AudioProcessingEvent) => {
private processAudioUpdate = (timeSeconds: number) => {
if (!this.recording) return;
// The time domain is the input to the FFT, which means we use an array of the same
@ -162,12 +198,12 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
this.observable.update({
waveform: translatedData,
timeSeconds: ev.playbackTime,
timeSeconds: timeSeconds,
});
// Now that we've updated the data/waveform, let's do a time check. We don't want to
// go horribly over the limit. We also emit a warning state if needed.
const secondsLeft = TARGET_MAX_LENGTH - ev.playbackTime;
const secondsLeft = TARGET_MAX_LENGTH - timeSeconds;
if (secondsLeft <= 0) {
// noinspection JSIgnoredPromiseFromCall - we aren't concerned with it overlapping
this.stop();
@ -191,7 +227,6 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
}
this.observable = new SimpleObservable<IRecordingUpdate>();
await this.makeRecorder();
this.recorderProcessor.addEventListener("audioprocess", this.processAudioUpdate);
await this.recorder.start();
this.recording = true;
this.emit(RecordingState.Started);
@ -205,6 +240,7 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
// Disconnect the source early to start shutting down resources
this.recorderSource.disconnect();
this.recorderWorklet.disconnect();
await this.recorder.stop();
// close the context after the recorder so the recorder doesn't try to
@ -216,7 +252,6 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
// Finally do our post-processing and clean up
this.recording = false;
this.recorderProcessor.removeEventListener("audioprocess", this.processAudioUpdate);
await this.recorder.close();
this.emit(RecordingState.Ended);
@ -240,7 +275,7 @@ export class VoiceRecording extends EventEmitter implements IDestroyable {
this.emit(RecordingState.Uploading);
this.mxc = await this.client.uploadContent(new Blob([this.buffer], {
type: "audio/ogg",
type: this.contentType,
}), {
onlyContentUri: false, // to stop the warnings in the console
}).then(r => r['content_uri']);

37
src/voice/consts.ts Normal file
View file

@ -0,0 +1,37 @@
/*
Copyright 2021 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
export const WORKLET_NAME = "mx-voice-worklet";
export enum PayloadEvent {
Timekeep = "timekeep",
AmplitudeMark = "amplitude_mark",
}
export interface IPayload {
ev: PayloadEvent;
}
export interface ITimingPayload extends IPayload {
ev: PayloadEvent.Timekeep;
timeSeconds: number;
}
export interface IAmplitudePayload extends IPayload {
ev: PayloadEvent.AmplitudeMark;
forSecond: number;
amplitude: number;
}