Move hardcoded audio configuration to AudioConfig

This will allow to use these constants from different classes not
directly related to AudioCapture.

PR #5102 <https://github.com/Genymobile/scrcpy/pull/5102>
This commit is contained in:
Romain Vimont 2024-07-14 23:10:07 +02:00
parent 5e605b9b8f
commit a2f3a5cf18
4 changed files with 31 additions and 13 deletions

View File

@ -20,17 +20,14 @@ import java.nio.ByteBuffer;
public final class AudioCapture {
public static final int SAMPLE_RATE = 48000;
public static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_STEREO;
public static final int CHANNELS = 2;
public static final int CHANNEL_MASK = AudioFormat.CHANNEL_IN_LEFT | AudioFormat.CHANNEL_IN_RIGHT;
public static final int ENCODING = AudioFormat.ENCODING_PCM_16BIT;
public static final int BYTES_PER_SAMPLE = 2;
private static final int SAMPLE_RATE = AudioConfig.SAMPLE_RATE;
private static final int CHANNEL_CONFIG = AudioConfig.CHANNEL_CONFIG;
private static final int CHANNELS = AudioConfig.CHANNELS;
private static final int CHANNEL_MASK = AudioConfig.CHANNEL_MASK;
private static final int ENCODING = AudioConfig.ENCODING;
private static final int BYTES_PER_SAMPLE = AudioConfig.BYTES_PER_SAMPLE;
// Never read more than 1024 samples, even if the buffer is bigger (that would increase latency).
// A lower value is useless, since the system captures audio samples by blocks of 1024 (so for example if we read by blocks of 256 samples, we
// receive 4 successive blocks without waiting, then we wait for the 4 next ones).
public static final int MAX_READ_SIZE = 1024 * CHANNELS * BYTES_PER_SAMPLE;
private static final int MAX_READ_SIZE = AudioConfig.MAX_READ_SIZE;
private static final long ONE_SAMPLE_US = (1000000 + SAMPLE_RATE - 1) / SAMPLE_RATE; // 1 sample in microseconds (used for fixing PTS)

View File

@ -0,0 +1,21 @@
package com.genymobile.scrcpy.audio;
import android.media.AudioFormat;
public final class AudioConfig {
public static final int SAMPLE_RATE = 48000;
public static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_STEREO;
public static final int CHANNELS = 2;
public static final int CHANNEL_MASK = AudioFormat.CHANNEL_IN_LEFT | AudioFormat.CHANNEL_IN_RIGHT;
public static final int ENCODING = AudioFormat.ENCODING_PCM_16BIT;
public static final int BYTES_PER_SAMPLE = 2;
// Never read more than 1024 samples, even if the buffer is bigger (that would increase latency).
// A lower value is useless, since the system captures audio samples by blocks of 1024 (so for example if we read by blocks of 256 samples, we
// receive 4 successive blocks without waiting, then we wait for the 4 next ones).
public static final int MAX_READ_SIZE = 1024 * CHANNELS * BYTES_PER_SAMPLE;
private AudioConfig() {
// Not instantiable
}
}

View File

@ -44,8 +44,8 @@ public final class AudioEncoder implements AsyncProcessor {
}
}
private static final int SAMPLE_RATE = AudioCapture.SAMPLE_RATE;
private static final int CHANNELS = AudioCapture.CHANNELS;
private static final int SAMPLE_RATE = AudioConfig.SAMPLE_RATE;
private static final int CHANNELS = AudioConfig.CHANNELS;
private final AudioCapture capture;
private final Streamer streamer;

View File

@ -30,7 +30,7 @@ public final class AudioRawRecorder implements AsyncProcessor {
return;
}
final ByteBuffer buffer = ByteBuffer.allocateDirect(AudioCapture.MAX_READ_SIZE);
final ByteBuffer buffer = ByteBuffer.allocateDirect(AudioConfig.MAX_READ_SIZE);
final MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
try {