HTML5录音实践总结(Preact)
获取 PCM 数据
处理 PCM 数据
Float32 转 Int16
ArrayBuffer 转 Base64
PCM 文件播放
重采样
PCM 转 MP3
PCM 转 WAV
短时能量计算
Web Worker优化性能
音频存储(IndexedDB)
WebView 开启 WebrTC
获取 PCM 数据
查看 DEMO
https://github.com/deepkolos/pc-pcm-wave
样例代码:
const mediaStream = await window.navigator.mediaDevices.getUserMedia({
audio: {
// sampleRate: 44100, // 采样率 不生效需要手动重采样 channelCount: 1, // 声道 // echoCancellation: true, // noiseSupPression: true, // 降噪 实测效果不错 }
,}
)const audioContext = new window.AudioContext()const inputSampleRate = audioContext.sampleRateconst mediaNode = audioContext.createMediaStreamSource(mediaStream)if (!audioContext.createScriptPRocessor) {
audioContext.createScriptProcessor = audioContext.createJavaScriptNode}
// 创建一个jsNodeconst jsNode = audioContext.createScriptProcessor(4096, 1, 1)jsNode.connect(audioContext.destination)jsNode.onaudioprocess = (e) =>
{
// e.inputBuffer.getChannelData(0) (left) // 双通道通过e.inputBuffer.getChannelData(1)获取 (right)}
mediaNode.connect(jsNode)简要流程如下:
start=>
start: 开始getUserMedia=>
operation: 获取MediaStreamaudioContext=>
oPEration: 创建AudioContextscriptNode=>
operation: 创建scriptNode并关联AudioContextonaudioprocess=>
operation: 设置onaudioprocess并处理数据end=>
end: 结束start->
getUserMedia->
audioContext->
scriptNode->
onaudioprocess->
end停止录制只需要把 audioContext 挂在的 node 卸载即可,然后把存储的每一帧数据合并即可产出 PCM 数据
jsNode.disconnect()mediaNode.disconnect()jsNode.onaudioprocess = null
PCM 数据处理
通过 WebRTC 获取的 PCM 数据格式是 Float32 的, 如果是双通道录音的话, 还需要增加合并通道
const leftDataList = [];
const rightDataList = [];
function onAudioProcess(event) {
// 一帧的音频PCM数据 let audioBuffer = event.inputBuffer;
leftDataList.push(audioBuffer.getChannelData(0).slice(0));
rightDataList.push(audioBuffer.getChannelData(1).slice(0));
}
// 交叉合并左右声道的数据function interleaveLeftAndRight(left, right) {
let totalLength = left.length + right.length;
let data = new Float32Array(totalLength);
for (let i = 0;
i left.length;
i++) {
let k = i * 2;
data[k] = left[i];
data[k + 1] = right[i];
}
return data;
}
Float32 转 Int16
const float32 = new Float32Array(1)const int16 = Int16Array.From( float32.map(x =>
(x >
0 ? x * 0x7fff : x * 0x8000)),)arrayBuffer 转 Base64
注意: 在浏览器上有个 BToa() 函数也是可以转换为 Base64 但是输入参数必须为字符串, 如果传递 buffer 参数会先被 toString() 然后再 Base64 , 使用 ffplay 播放反序列化的 Base64 , 会比较刺耳
使用 base64-arraybuffer 即可完成
import {
encode }
from 'base64-arraybuffer'const float32 = new Float32Array(1)const int16 = Int16Array.from( float32.map(x =>
(x >
0 ? x * 0x7fff : x * 0x8000)),)console.LOG(encode(int16.buffer))验证 Base64 是否正确, 可以在 node 下把产出的 Base64 转换为 Int16 的 PCM 文件, 然后使用 FFPlay 播放, 看看音频是否正常播放
PCM 文件播放
# 单通道 采样率:16000 Int16ffplay -f s16le -ar 16k -ac 1 test.pcm# 双通道 采样率:48000 Float32ffplay -f f32le -ar 48000 -ac 2 test.pcm
重采样/调整采样率
虽然 getUserMedia 参数可设置采样率, 但是在最新Chrome也不生效, 所以需要手动做个重采样
const mediaStream = awaIT window.navigator.mediaDevices.getUserMedia({
audio: {
// sampleRate: 44100, // 采样率 设置不生效 channelCount: 1, // 声道 // echoCancellation: true, // 减低回音 // noiseSuppression: true, // 降噪, 实测效果不错 }
,}
)使用 wave-resampler 即可完成
import {
resample }
from 'wave-resampler'const inputSampleRate = 44100const outputSampleRate = 16000const resampledBuffers = resample( // 需要onAudioProcess每一帧的buffer合并后的数组 mergeArray(audioBuffers), inputSampleRate, outputSampleRate,)PCM 转 MP3
import {
Mp3Encoder }
from 'lamejs'let mp3bufconst mp3Data = []const sampleBlockSize = 576 * 10 // 工作缓存区, 576的倍数const mp3Encoder = new Mp3Encoder(1, outputSampleRate, kbps)const samples = float32ToInt16( audioBuffers, inputSampleRate, outputSampleRate,)let remaining = samples.lengthfor (let i = 0;
remaining >
= 0;
i += sampleBlockSize) {
const left = samples.subarray(i, i + sampleBlockSize) mp3buf = mp3Encoder.encodeBuffer(left) mp3Data.push(new Int8Array(mp3buf)) remaining -= sampleBlockSize}
mp3Data.push(new Int8Array(mp3Encoder.flush()))console.log(mp3Data)// 工具函数function float32ToInt16(audioBuffers, inputSampleRate, outputSampleRate) {
const float32 = resample( // 需要onAudioProcess每一帧的buffer合并后的数组 mergeArray(audioBuffers), inputSampleRate, outputSampleRate, ) const int16 = Int16Array.from( float32.map(x =>
(x >
0 ? x * 0x7fff : x * 0x8000)), ) return int16}
使用 lamejs 即可, 但是体积较大(160+KB), 如果没有存储需求可使用 WAV 格式
>
ls -alh-rwxrwxrwx 1 root root 95K 4月 22 12:45 12s.mp3*-rwxrwxrwx 1 root root 1.1M 4月 22 12:44 12s.wav*-rwxrwxrwx 1 root root 235K 4月 22 12:41 30s.mp3*-rwxrwxrwx 1 root root 2.6M 4月 22 12:40 30s.wav*-rwxrwxrwx 1 root root 63K 4月 22 12:49 8s.mp3*-rwxrwxrwx 1 root root 689K 4月 22 12:48 8s.wav*PCM 转 WAV
function mergeArray(list) {
const length = list.length * list[0].length const data = new Float32Array(length) let offset = 0 for (let i = 0;
i list.length;
i++) {
data.set(list[i], offset) offset += list[i].length }
return data}
function writeUTFBytes(view, offset, string) {
VAR lng = string.length for (let i = 0;
i lng;
i++) {
view.setUint8(offset + i, string.charCodeAt(i)) }
}
function createWavbuffer(audioData, sampleRate = 44100, channels = 1) {
const WAV_HEAD_SIZE = 44 const buffer = new ArrayBuffer(audioData.length * 2 + WAV_HEAD_SIZE) // 需要用一个view来操控buffer const view = new DataView(buffer) // 写入wav头部信息 // RIFF chunk descriptor/identifier writeUTFBytes(view, 0, 'RIFF') // RIFF chunk length view.setUint32(4, 44 + audioData.length * 2, true) // RIFF type writeUTFBytes(view, 8, 'WAVE') // format chunk identifier // FMT sub-chunk writeUTFBytes(view, 12, 'fmt') // format chunk length view.setUint32(16, 16, true) // sample format (raw) view.setUint16(20, 1, true) // stereo (2 channels) view.setUint16(22, channels, true) // sample rate view.setUint32(24, sampleRate, true) // byte rate (sample rate * block align) view.setUint32(28, sampleRate * 2, true) // block align (channel count * bytes per sample) view.setUint16(32, channels * 2, true) // bits per sample view.setUint16(34, 16, true) // data sub-chunk // data chunk identifier writeUTFBytes(view, 36, 'data') // data chunk length view.setUint32(40, audioData.length * 2, true) // 写入PCM数据 let index = 44 const volume = 1 const {
length }
= audioData for (let i = 0;
i length;
i++) {
view.setInt16(index, audioData[i] * (0x7fff * volume), true) index += 2 }
return buffer}
// 需要onAudioProcess每一帧的buffer合并后的数组createWavBuffer(mergeArray(audioBuffers))WAV 基本上是 PCM 加上一些音频信息
简单的短时能量计算
function shortTimeEnergy(audioData) {
let sum = 0 const energy = [] const {
length }
= audioData for (let i = 0;
i length;
i++) {
sum += audioData[i] ** 2 if ((i + 1) % 256 === 0) {
energy.push(sum) sum = 0 }
else if (i === length - 1) {
energy.push(sum) }
}
return energy}
由于计算结果有会因设备的录音增益差异较大, 计算出数据也较大, 所以使用比值简单区分人声和噪音
查看 DEMO
const NoiseVoiceWatershedWave = 2.3const energy = shortTimeEnergy(e.inputBuffer.getChannelData(0).slice(0))const avg = energy.reduce((a, b) =>
a + b) / energy.lengthconst nextstate = Math.max(...energy) / avg >
NoiseVoiceWatershedWave ? 'voice' : 'noise'Web Worker 优化性能
音频数据数据量较大, 所以可以使用 Web Worker 进行优化, 不卡 UI 线程
在 Webpack 项目里 Web Worker 比较简单, 安装 worker-loader 即可
preact.config.js
export default (config, env, helpers) =>
{
config.module.rules.push({
test: /\.worker\.js$/, use: {
loader: 'worker-loader', options: {
inline: true }
}
, }
)}
recorder.worker.js
self.addEventListener('message', event =>
{
console.log(event.data) // 转MP3/转Base64/转WAV等等 const output = '' self.postMessage(output)}
使用 Worker
async function toMP3(audioBuffers, inputSampleRate, outputSampleRate = 16000) {
const {
default: Worker }
= await import('./recorder.worker') const worker = new Worker() // 简单使用, 项目可以在recorder实例化的时候创建worker实例, 有并法需求可多个实例 return new Promise(resolve =>
{
worker.postMessage({
audioBuffers: audioBuffers, inputSampleRate: inputSampleRate, outputSampleRate: outputSampleRate, type: 'mp3', }
) worker.onmessage = event =>
resolve(event.data) }
)}
音频的存储
浏览器持久化储存的地方有 LocalStorage 和 IndexedDB , 其中 LocalStorage 较为常用, 但是只能储存字符串, 而 IndexedDB 可直接储存 Blob , 所以优先选择 IndexedDB ,使用 LocalStorage 则需要转 Base64 体积将会更大
所以为了避免占用用户太多空间, 所以选择MP3格式进行存储
>
ls -alh-rwxrwxrwx 1 root root 95K 4月 22 12:45 12s.mp3*-rwxrwxrwx 1 root root 1.1M 4月 22 12:44 12s.wav*-rwxrwxrwx 1 root root 235K 4月 22 12:41 30s.mp3*-rwxrwxrwx 1 root root 2.6M 4月 22 12:40 30s.wav*-rwxrwxrwx 1 root root 63K 4月 22 12:49 8s.mp3*-rwxrwxrwx 1 root root 689K 4月 22 12:48 8s.wav*IndexedDB 简单封装如下, 熟悉后台的同学可以找个 ORM 库方便数据读写
const indexedDB = window.indexedDB || window.webkitIndexedDB || window.mozIndexedDB || window.OIndexedDB || window.msIndexedDBconst IDBTransaction = window.IDBTransaction || window.webkitIDBTransaction || window.OIDBTransaction || window.msIDBTransactionconst readWriteMode = typeof IDBTransaction.READ_WRITE === 'undefined' ? 'readwrite' : IDBTransaction.READ_WRITEconst dbVersion = 1const StoreDefault = 'mp3'let dbLinkfunction initDB(store) {
return new Promise((resolve, reject) =>
{
if (dbLink) resolve(dbLink) // Create/open database const request = indexedDB.open('audio', dbVersion) request.onsuccess = event =>
{
const db = request.result db.onerror = event =>
{
reject(event) }
if (db.version === dbVersion) resolve(db) }
request.onerror = event =>
{
reject(event) }
// For Future use. currently only in latest Firefox versions request.onupgradeneeded = event =>
{
dbLink = event.target.result const {
transaction }
= event.target if (!dbLink.objectStorenames.contains(store)) {
dbLink.createObjectStore(store) }
transaction.oncomplete = event =>
{
// Now store is available to be populated resolve(dbLink) }
}
}
)}
export const writeiDB = async (name, blob, store = storeDefault) =>
{
const db = await initDB(store) const transaction = db.transaction([store], readWriteMode) const objStore = transaction.objectStore(store) return new Promise((resolve, reject) =>
{
const request = objStore.put(blob, name) request.onsuccess = event =>
resolve(event) request.onerror = event =>
reject(event) transaction.COMmit &
&
transaction.commit() }
)}
export const readIDB = async (name, store = storeDefault) =>
{
const db = await initDB(store) const transaction = db.transaction([store], readWriteMode) const objStore = transaction.objectStore(store) return new Promise((resolve, reject) =>
{
const request = objStore.get(name) request.onsuccess = event =>
resolve(event.target.result) request.onerror = event =>
reject(event) transaction.commit &
&
transaction.commit() }
)}
export const clearIDB = async (store = storeDefault) =>
{
const db = await initDB(store) const transaction = db.transaction([store], readWriteMode) const objStore = transaction.objectStore(store) return new Promise((resolve, reject) =>
{
const request = objStore.clear() request.onsuccess = event =>
resolve(event) request.onerror = event =>
reject(event) transaction.commit &
&
transaction.commit() }
)}
WebView 开启 WebRTC
见 WebView WebRTC not working
webView.setWebChromeClient(new WebChromeClient(){
@TargetApi(Build.VERSION_CODES.LOLLIPOP) @override public void onPermissionRequest(final PermissionRequest request) {
request.grant(request.getResources());
}
}
);
到此这篇关于HTML5录音实践总结(Preact)的文章就介绍到这了,更多相关html5录音内容请搜索以前的文章或继续浏览下面的相关文章,希望大家以后多多支持!
声明:本文内容由网友自发贡献,本站不承担相应法律责任。对本内容有异议或投诉,请联系2913721942#qq.com核实处理,我们将尽快回复您,谢谢合作!
若转载请注明出处: HTML5录音实践总结(Preact)
本文地址: https://pptw.com/jishu/586138.html
