checkpoint
parent
684758857e
commit
db73d04f0d
@ -0,0 +1,56 @@
|
||||
import { createReadStream } from "fs";
|
||||
|
||||
export type SwapPoint = {
|
||||
position_start: number;
|
||||
duration: number;
|
||||
loud: boolean;
|
||||
label: string;
|
||||
};
|
||||
|
||||
export default async function (
|
||||
file: import("fs").PathLike,
|
||||
threshold_at_point: number,
|
||||
inertia_samples: number,
|
||||
label: string
|
||||
): Promise<SwapPoint[]> {
|
||||
const stream = createReadStream(file);
|
||||
let position = 0;
|
||||
const results: SwapPoint[] = [];
|
||||
let last_swap_position = 0;
|
||||
let keep_loud_until = 0;
|
||||
let was_loud_last_time = false;
|
||||
return new Promise((resolve, reject) => {
|
||||
stream.on("readable", () => {
|
||||
let chunk: Buffer | null;
|
||||
while ((chunk = stream.read()) !== null) {
|
||||
for (let i = 0; i < chunk.byteLength; i++) {
|
||||
position++;
|
||||
const byte = chunk[i];
|
||||
const volume = Math.abs(byte - 128);
|
||||
if (position >= keep_loud_until) {
|
||||
const is_loud: boolean = volume > threshold_at_point;
|
||||
if (is_loud != was_loud_last_time) {
|
||||
const swap_point = {
|
||||
position_start: last_swap_position,
|
||||
duration: position - last_swap_position,
|
||||
loud: was_loud_last_time,
|
||||
label,
|
||||
};
|
||||
results.push(swap_point);
|
||||
last_swap_position = position;
|
||||
was_loud_last_time = is_loud;
|
||||
}
|
||||
}
|
||||
if (volume > threshold_at_point) {
|
||||
keep_loud_until = position + inertia_samples;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
stream.on("end", () => {
|
||||
resolve(results);
|
||||
});
|
||||
|
||||
stream.on("error", reject);
|
||||
});
|
||||
}
|
@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
|
||||
#
|
||||
# Musisz mieć globalnie zainstalowane ts-node (npm install -g ts-node)
|
||||
#
|
||||
# W katalogu z tym skryptem musisz mieć katalog "pics", w którym są pliki "left.png", "right.png", "none.png" i "both.png"
|
||||
#
|
||||
|
||||
input=/home/kuba/Downloads/podcast-01-after-effects.mp3 # tutaj dajemy ścieżkę do pliku mp3 z Arkiem w jednym kanale i Kubą w drugim
|
||||
|
||||
aresample=8000 # to bez zmian
|
||||
|
||||
echo dzielimy mp3 na dwa osobne wav
|
||||
ffmpeg -i $input -map_channel 0.0.0 /tmp/left.wav -map_channel 0.0.1 /tmp/right.wav
|
||||
|
||||
echo na dwóch wątkach generujemy surowe pliki
|
||||
ffmpeg -i /tmp/left.wav -ac 1 -filter:a aresample=$aresample -map 0:a -c:a pcm_u8 -f data - > /tmp/leftraw &
|
||||
ffmpeg -i /tmp/right.wav -ac 1 -filter:a aresample=$aresample -map 0:a -c:a pcm_u8 -f data - > /tmp/rightraw &
|
||||
|
||||
# czekamy aż obydwa wątki się zakończą
|
||||
wait;
|
||||
|
||||
echo "generating the demuxers...";
|
||||
|
||||
# generuje ścieżki do złożenia przez ffmpega:
|
||||
ts-node generate-demuxer.ts > /tmp/demuxer.txt
|
||||
|
||||
mkdir -f out
|
||||
|
||||
# używa demuxer.txt żeby skleić końcowe video z dźwiękiem:
|
||||
echo generowanie całości
|
||||
ffmpeg -y -f concat -i /tmp/demuxer.txt -r 30 -tune stillimage -vsync vfr -pix_fmt yuv420p out/video.mp4
|
||||
|
||||
echo łączenie video z dźwiękiem:
|
||||
ffmpeg -i video.mp4 -i $input -ac 1 -tune stillimage out/video-and-audio.mp4
|
Binary file not shown.
After Width: | Height: | Size: 329 KiB |
Binary file not shown.
After Width: | Height: | Size: 311 KiB |
Binary file not shown.
After Width: | Height: | Size: 287 KiB |
Binary file not shown.
After Width: | Height: | Size: 306 KiB |
@ -0,0 +1,26 @@
|
||||
export default class Window {
|
||||
length: number;
|
||||
buffer: number[] = [];
|
||||
constructor(length: number) {
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
push(element: number) {
|
||||
if (this.buffer.length == this.length) {
|
||||
this.buffer.shift();
|
||||
}
|
||||
this.buffer.push(element);
|
||||
}
|
||||
|
||||
sum() {
|
||||
return this.buffer.reduce((a, b) => a + b, 0);
|
||||
}
|
||||
|
||||
clear() {
|
||||
this.buffer = [];
|
||||
}
|
||||
|
||||
isFull() {
|
||||
return this.buffer.length == this.length;
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue