/**
*/
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){
// filter out targetEncoding from the old getOutputFormats( sourceFormat ) method
AudioFormat[] formats = getOutputFormats( sourceFormat );
Vector newFormats = new Vector();
for(int i=0; i<formats.length; i++ ) {
if( formats[i].getEncoding().equals( targetEncoding ) ) {
newFormats.addElement( formats[i] );
}
}
AudioFormat[] formatArray = new AudioFormat[newFormats.size()];
for (int i = 0; i < formatArray.length; i++) {
formatArray[i] = (AudioFormat)(newFormats.elementAt(i));
}
return formatArray;
}
java类javax.sound.sampled.AudioFormat的实例源码
PCMtoPCMCodec.java 文件源码
项目:jdk8u-jdk
阅读 16
收藏 0
点赞 0
评论 0
AiffFileWriter.java 文件源码
项目:jdk8u-jdk
阅读 18
收藏 0
点赞 0
评论 0
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {
AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
System.arraycopy(types, 0, filetypes, 0, types.length);
// make sure we can write this stream
AudioFormat format = stream.getFormat();
AudioFormat.Encoding encoding = format.getEncoding();
if( (AudioFormat.Encoding.ALAW.equals(encoding)) ||
(AudioFormat.Encoding.ULAW.equals(encoding)) ||
(AudioFormat.Encoding.PCM_SIGNED.equals(encoding)) ||
(AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding)) ) {
return filetypes;
}
return new AudioFileFormat.Type[0];
}
FloatSampleBuffer.java 文件源码
项目:romanov
阅读 22
收藏 0
点赞 0
评论 0
/**
* Resets this buffer with the audio data specified in the arguments. This
* FloatSampleBuffer's sample count will be set to
* <code>byteCount / format.getFrameSize()</code>.
*
* @param lazy if true, then existing channels will be tried to be re-used
* to minimize garbage collection.
* @throws IllegalArgumentException
*/
public void initFromByteArray(byte[] buffer, int offset, int byteCount,
AudioFormat format, boolean lazy) {
if (offset + byteCount > buffer.length) {
throw new IllegalArgumentException(
"FloatSampleBuffer.initFromByteArray: buffer too small.");
}
int thisSampleCount = byteCount / format.getFrameSize();
init(format.getChannels(), thisSampleCount, format.getSampleRate(),
lazy);
// save format for automatic dithering mode
originalFormatType = FloatSampleTools.getFormatType(format);
FloatSampleTools.byte2float(buffer, offset, channels, 0, sampleCount,
format);
}
FloatSampleBuffer.java 文件源码
项目:romanov
阅读 20
收藏 0
点赞 0
评论 0
/**
* Write the contents of the byte array to this buffer, overwriting existing
* data. If the byte array has fewer channels than this float buffer, only
* the first channels are written. Vice versa, if the byte buffer has more
* channels than this float buffer, only the first channels of the byte
* buffer are written to this buffer.
* <p>
* The format and the number of samples of this float buffer are not
* changed, so if the byte array has more samples than fit into this float
* buffer, it is not expanded.
*
* @param buffer the byte buffer to write to this float buffer
* @param srcByteOffset the offset in bytes in buffer where to start reading
* @param format the audio format of the bytes in buffer
* @param dstSampleOffset the offset in samples where to start writing the
* converted float data into this float buffer
* @param aSampleCount the number of samples to write
* @return the number of samples actually written
*/
public int writeByteBuffer(byte[] buffer, int srcByteOffset,
AudioFormat format, int dstSampleOffset, int aSampleCount) {
if (dstSampleOffset + aSampleCount > getSampleCount()) {
aSampleCount = getSampleCount() - dstSampleOffset;
}
int lChannels = format.getChannels();
if (lChannels > getChannelCount()) {
lChannels = getChannelCount();
}
if (lChannels > format.getChannels()) {
lChannels = format.getChannels();
}
for (int channel = 0; channel < lChannels; channel++) {
float[] data = getChannel(channel);
FloatSampleTools.byte2floatGeneric(buffer, srcByteOffset,
format.getFrameSize(), data, dstSampleOffset, aSampleCount,
format);
srcByteOffset += format.getFrameSize() / format.getChannels();
}
return aSampleCount;
}
AudioFloatFormatConverter.java 文件源码
项目:openjdk-jdk10
阅读 31
收藏 0
点赞 0
评论 0
@Override
public AudioInputStream getAudioInputStream(Encoding targetEncoding,
AudioInputStream sourceStream) {
if (!isConversionSupported(targetEncoding, sourceStream.getFormat())) {
throw new IllegalArgumentException(
"Unsupported conversion: " + sourceStream.getFormat()
.toString() + " to " + targetEncoding.toString());
}
if (sourceStream.getFormat().getEncoding().equals(targetEncoding))
return sourceStream;
AudioFormat format = sourceStream.getFormat();
int channels = format.getChannels();
Encoding encoding = targetEncoding;
float samplerate = format.getSampleRate();
int bits = format.getSampleSizeInBits();
boolean bigendian = format.isBigEndian();
if (targetEncoding.equals(Encoding.PCM_FLOAT))
bits = 32;
AudioFormat targetFormat = new AudioFormat(encoding, samplerate, bits,
channels, channels * bits / 8, samplerate, bigendian);
return getAudioInputStream(targetFormat, sourceStream);
}
WaveFileWriter.java 文件源码
项目:jdk8u-jdk
阅读 21
收藏 0
点赞 0
评论 0
public AudioFileFormat.Type[] getAudioFileTypes(AudioInputStream stream) {
AudioFileFormat.Type[] filetypes = new AudioFileFormat.Type[types.length];
System.arraycopy(types, 0, filetypes, 0, types.length);
// make sure we can write this stream
AudioFormat format = stream.getFormat();
AudioFormat.Encoding encoding = format.getEncoding();
if( AudioFormat.Encoding.ALAW.equals(encoding) ||
AudioFormat.Encoding.ULAW.equals(encoding) ||
AudioFormat.Encoding.PCM_SIGNED.equals(encoding) ||
AudioFormat.Encoding.PCM_UNSIGNED.equals(encoding) ) {
return filetypes;
}
return new AudioFileFormat.Type[0];
}
PCMtoPCMCodec.java 文件源码
项目:OpenJSharp
阅读 23
收藏 0
点赞 0
评论 0
/**
*/
public AudioInputStream getAudioInputStream(AudioFormat.Encoding targetEncoding, AudioInputStream sourceStream) {
if( isConversionSupported(targetEncoding, sourceStream.getFormat()) ) {
AudioFormat sourceFormat = sourceStream.getFormat();
AudioFormat targetFormat = new AudioFormat( targetEncoding,
sourceFormat.getSampleRate(),
sourceFormat.getSampleSizeInBits(),
sourceFormat.getChannels(),
sourceFormat.getFrameSize(),
sourceFormat.getFrameRate(),
sourceFormat.isBigEndian() );
return getAudioInputStream( targetFormat, sourceStream );
} else {
throw new IllegalArgumentException("Unsupported conversion: " + sourceStream.getFormat().toString() + " to " + targetEncoding.toString() );
}
}
JavaSoundAudioDevice.java 文件源码
项目:jlayer
阅读 21
收藏 0
点赞 0
评论 0
/**
* Runs a short test by playing a short silent sound.
*/
public void test()
throws JavaLayerException
{
try
{
open(new AudioFormat(22050, 16, 1, true, false));
short[] data = new short[22050/10];
write(data, 0, data.length);
flush();
close();
}
catch (RuntimeException ex)
{
throw new JavaLayerException("Device test failed: "+ex);
}
}
JSBufferedSampleRecorder.java 文件源码
项目:romanov
阅读 18
收藏 0
点赞 0
评论 0
/**
* Constructs a JSBufferedSampleRecorder that expects audio in the given AudioFormat and
* which will save to a file with given name.
*
* @param format the AudioFormat you want to record in
* @param name the name of the file to save to (not including the extension)
*/
JSBufferedSampleRecorder(JSMinim sys,
String fileName,
AudioFileFormat.Type fileType,
AudioFormat fileFormat,
int bufferSize)
{
name = fileName;
type = fileType;
format = fileFormat;
buffers = new ArrayList<FloatBuffer>(20);
left = FloatBuffer.allocate(bufferSize*10);
if ( format.getChannels() == Minim.STEREO )
{
right = FloatBuffer.allocate(bufferSize*10);
}
else
{
right = null;
}
system = sys;
}
AiffSampleRate.java 文件源码
项目:openjdk-jdk10
阅读 20
收藏 0
点赞 0
评论 0
private static boolean testSampleRate(float sampleRate) {
boolean result = true;
try {
// create AudioInputStream with sample rate of 10000 Hz
ByteArrayInputStream data = new ByteArrayInputStream(new byte[1]);
AudioFormat format = new AudioFormat(sampleRate, 8, 1, true, true);
AudioInputStream stream = new AudioInputStream(data, format, 1);
// write to AIFF file
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
AudioSystem.write(stream, AudioFileFormat.Type.AIFF, outputStream);
byte[] fileData = outputStream.toByteArray();
InputStream inputStream = new ByteArrayInputStream(fileData);
AudioFileFormat aff = AudioSystem.getAudioFileFormat(inputStream);
if (! equals(sampleRate, aff.getFormat().getFrameRate())) {
out("error for sample rate " + sampleRate);
result = false;
}
} catch (Exception e) {
out(e);
out("Test NOT FAILED");
}
return result;
}
JitterExample.java 文件源码
项目:rcom
阅读 22
收藏 0
点赞 0
评论 0
public static void main(String[] args) throws Exception {
AbstractRcomArgs a=new AbstractRcomArgs();
UtilCli.parse(a, args, true);
File folder=new File("/home/rizsi/tmp/video");
byte[] data=UtilFile.loadFile(new File(folder, "remote.sw"));
AudioFormat format=ManualTestEchoCancel.getFormat();
final Mixer mixer = AudioSystem.getMixer(null);
DataLine.Info info2= new DataLine.Info(SourceDataLine.class, format);
SourceDataLine s=(SourceDataLine) mixer.getLine(info2);
s.open(format, framesamples*2);
s.start();
try(LoopInputStream lis=new LoopInputStream(data))
{
try(JitterResampler rs=new JitterResampler(a, 8000, framesamples, 2))
{
new FeedThread(lis, rs).start();
final byte[] buffer=new byte[framesamples*2];;
while(true)
{
rs.readOutput(buffer);
s.write(buffer, 0, buffer.length);
}
}
}
}
PCMtoPCMCodec.java 文件源码
项目:openjdk-jdk10
阅读 16
收藏 0
点赞 0
评论 0
@Override
public AudioInputStream getAudioInputStream(AudioFormat.Encoding targetEncoding, AudioInputStream sourceStream) {
if( isConversionSupported(targetEncoding, sourceStream.getFormat()) ) {
AudioFormat sourceFormat = sourceStream.getFormat();
AudioFormat targetFormat = new AudioFormat( targetEncoding,
sourceFormat.getSampleRate(),
sourceFormat.getSampleSizeInBits(),
sourceFormat.getChannels(),
sourceFormat.getFrameSize(),
sourceFormat.getFrameRate(),
sourceFormat.isBigEndian() );
return getConvertedStream(targetFormat, sourceStream);
} else {
throw new IllegalArgumentException("Unsupported conversion: " + sourceStream.getFormat().toString() + " to " + targetEncoding.toString() );
}
}
Replay.java 文件源码
项目:rcom
阅读 15
收藏 0
点赞 0
评论 0
public static void main(String[] args) throws IOException, LineUnavailableException {
File folder=new File("/home/rizsi/tmp/video");
byte[] data=UtilFile.loadFile(new File(folder, "remote.sw"));
byte[] data2=UtilFile.loadFile(new File(folder, "local.sw"));
System.out.println("remote.sw max: "+measureMax(data));
System.out.println("local.sw max: "+measureMax(data2));
byte[] data3=sum(data, data2);
UtilFile.saveAsFile(new File(folder, "rawmic.sw"), data3);
AudioFormat format=ManualTestEchoCancel.getFormat();
final Mixer mixer = AudioSystem.getMixer(null);
Play p=new Play(mixer, format, ManualTestEchoCancel.frameSamples)
{
@Override
protected void switchBuffer() {
if(getSample()==data)
{
setSample(data2);
}else if(getSample()==data2)
{
setSample(data3);
}
}
};
p.start();
p.setSample(data);
}
Microphone.java 文件源码
项目:BrainControl
阅读 25
收藏 0
点赞 0
评论 0
@Override
public void newProperties(PropertySheet ps) throws PropertyException {
super.newProperties(ps);
logger = ps.getLogger();
sampleRate = ps.getInt(PROP_SAMPLE_RATE);
int sampleSizeInBits = ps.getInt(PROP_BITS_PER_SAMPLE);
int channels = ps.getInt(PROP_CHANNELS);
bigEndian = ps.getBoolean(PROP_BIG_ENDIAN);
signed = ps.getBoolean(PROP_SIGNED);
desiredFormat = new AudioFormat((float) sampleRate, sampleSizeInBits, channels, signed, bigEndian);
closeBetweenUtterances = ps.getBoolean(PROP_CLOSE_BETWEEN_UTTERANCES);
msecPerRead = ps.getInt(PROP_MSEC_PER_READ);
keepDataReference = ps.getBoolean(PROP_KEEP_LAST_AUDIO);
stereoToMono = ps.getString(PROP_STEREO_TO_MONO);
selectedChannel = ps.getInt(PROP_SELECT_CHANNEL);
selectedMixerIndex = ps.getString(PROP_SELECT_MIXER);
audioBufferSize = ps.getInt(PROP_BUFFER_SIZE);
}
AudioFloatFormatConverter.java 文件源码
项目:OpenJSharp
阅读 21
收藏 0
点赞 0
评论 0
public AudioInputStream getAudioInputStream(AudioFormat targetFormat,
AudioFloatInputStream sourceStream) {
if (!isConversionSupported(targetFormat, sourceStream.getFormat()))
throw new IllegalArgumentException("Unsupported conversion: "
+ sourceStream.getFormat().toString() + " to "
+ targetFormat.toString());
if (targetFormat.getChannels() != sourceStream.getFormat()
.getChannels())
sourceStream = new AudioFloatInputStreamChannelMixer(sourceStream,
targetFormat.getChannels());
if (Math.abs(targetFormat.getSampleRate()
- sourceStream.getFormat().getSampleRate()) > 0.000001)
sourceStream = new AudioFloatInputStreamResampler(sourceStream,
targetFormat);
return new AudioInputStream(new AudioFloatFormatConverterInputStream(
targetFormat, sourceStream), targetFormat, sourceStream
.getFrameLength());
}
PCMtoPCMCodec.java 文件源码
项目:openjdk-jdk10
阅读 19
收藏 0
点赞 0
评论 0
@Override
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){
Objects.requireNonNull(targetEncoding);
// filter out targetEncoding from the old getOutputFormats( sourceFormat ) method
AudioFormat[] formats = getOutputFormats( sourceFormat );
Vector<AudioFormat> newFormats = new Vector<>();
for(int i=0; i<formats.length; i++ ) {
if( formats[i].getEncoding().equals( targetEncoding ) ) {
newFormats.addElement( formats[i] );
}
}
AudioFormat[] formatArray = new AudioFormat[newFormats.size()];
for (int i = 0; i < formatArray.length; i++) {
formatArray[i] = newFormats.elementAt(i);
}
return formatArray;
}
WaveFloatFileWriter.java 文件源码
项目:OpenJSharp
阅读 22
收藏 0
点赞 0
评论 0
public void write(AudioInputStream stream, RIFFWriter writer)
throws IOException {
RIFFWriter fmt_chunk = writer.writeChunk("fmt ");
AudioFormat format = stream.getFormat();
fmt_chunk.writeUnsignedShort(3); // WAVE_FORMAT_IEEE_FLOAT
fmt_chunk.writeUnsignedShort(format.getChannels());
fmt_chunk.writeUnsignedInt((int) format.getSampleRate());
fmt_chunk.writeUnsignedInt(((int) format.getFrameRate())
* format.getFrameSize());
fmt_chunk.writeUnsignedShort(format.getFrameSize());
fmt_chunk.writeUnsignedShort(format.getSampleSizeInBits());
fmt_chunk.close();
RIFFWriter data_chunk = writer.writeChunk("data");
byte[] buff = new byte[1024];
int len;
while ((len = stream.read(buff, 0, buff.length)) != -1)
data_chunk.write(buff, 0, len);
data_chunk.close();
}
NoteOverFlowTest.java 文件源码
项目:openjdk-jdk10
阅读 25
收藏 0
点赞 0
评论 0
public static void main(String[] args) throws Exception
{
AudioSynthesizer synth = new SoftSynthesizer();
AudioFormat format = new AudioFormat(44100, 16, 2, true, false);
AudioInputStream stream = synth.openStream(format, null);
// Make all voices busy, e.g.
// send midi on and midi off on all available voices
MidiChannel ch1 = synth.getChannels()[0];
ch1.programChange(48); // Use contionus instrument like string ensemble
for (int i = 0; i < synth.getMaxPolyphony(); i++) {
ch1.noteOn(64, 64);
ch1.noteOff(64);
}
// Now send single midi on, and midi off message
ch1.noteOn(64, 64);
ch1.noteOff(64);
// Read 10 sec from stream, by this time all voices should be inactvie
stream.skip(format.getFrameSize() * ((int)(format.getFrameRate() * 20)));
// If no voice are active, then this test will pass
VoiceStatus[] v = synth.getVoiceStatus();
for (int i = 0; i < v.length; i++) {
if(v[i].active)
{
throw new RuntimeException("Not all voices are inactive!");
}
}
// Close the synthesizer after use
synth.close();
}
SoftMixingDataLine.java 文件源码
项目:OpenJSharp
阅读 24
收藏 0
点赞 0
评论 0
public AudioFloatInputStreamResampler(AudioFloatInputStream ais,
AudioFormat format) {
this.ais = ais;
AudioFormat sourceFormat = ais.getFormat();
targetFormat = new AudioFormat(sourceFormat.getEncoding(), format
.getSampleRate(), sourceFormat.getSampleSizeInBits(),
sourceFormat.getChannels(), sourceFormat.getFrameSize(),
format.getSampleRate(), sourceFormat.isBigEndian());
nrofchannels = targetFormat.getChannels();
Object interpolation = format.getProperty("interpolation");
if (interpolation != null && (interpolation instanceof String)) {
String resamplerType = (String) interpolation;
if (resamplerType.equalsIgnoreCase("point"))
this.resampler = new SoftPointResampler();
if (resamplerType.equalsIgnoreCase("linear"))
this.resampler = new SoftLinearResampler2();
if (resamplerType.equalsIgnoreCase("linear1"))
this.resampler = new SoftLinearResampler();
if (resamplerType.equalsIgnoreCase("linear2"))
this.resampler = new SoftLinearResampler2();
if (resamplerType.equalsIgnoreCase("cubic"))
this.resampler = new SoftCubicResampler();
if (resamplerType.equalsIgnoreCase("lanczos"))
this.resampler = new SoftLanczosResampler();
if (resamplerType.equalsIgnoreCase("sinc"))
this.resampler = new SoftSincResampler();
}
if (resampler == null)
resampler = new SoftLinearResampler2(); // new
// SoftLinearResampler2();
pitch[0] = sourceFormat.getSampleRate() / format.getSampleRate();
pad = resampler.getPadding();
pad2 = pad * 2;
ibuffer = new float[nrofchannels][buffer_len + pad2];
ibuffer2 = new float[nrofchannels * buffer_len];
ibuffer_index = buffer_len + pad;
ibuffer_len = buffer_len;
}
RecognizeHugeWaveFiles.java 文件源码
项目:openjdk-jdk10
阅读 19
收藏 0
点赞 0
评论 0
/**
* Tests the {@code AudioFileFormat} fetched from the fake header.
* <p>
* Note that the frameLength and byteLength are stored as int which means
* that {@code AudioFileFormat} will store the data above {@code MAX_INT} as
* NOT_SPECIFIED.
*/
private static void testAFF(final byte[] type, final int rate,
final int channel, final long size)
throws Exception {
final byte[] header = createHeader(type, rate, channel, size);
final ByteArrayInputStream fake = new ByteArrayInputStream(header);
final AudioFileFormat aff = AudioSystem.getAudioFileFormat(fake);
final AudioFormat format = aff.getFormat();
if (aff.getType() != AudioFileFormat.Type.WAVE) {
throw new RuntimeException("Error");
}
final long frameLength = size / format.getFrameSize();
if (frameLength <= Integer.MAX_VALUE) {
if (aff.getFrameLength() != frameLength) {
System.err.println("Expected: " + frameLength);
System.err.println("Actual: " + aff.getFrameLength());
throw new RuntimeException();
}
} else {
if (aff.getFrameLength() != AudioSystem.NOT_SPECIFIED) {
System.err.println("Expected: " + AudioSystem.NOT_SPECIFIED);
System.err.println("Actual: " + aff.getFrameLength());
throw new RuntimeException();
}
}
validateFormat(type[1], rate, channel, aff.getFormat());
}
UlawCodec.java 文件源码
项目:openjdk-jdk10
阅读 19
收藏 0
点赞 0
评论 0
@Override
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){
Objects.requireNonNull(targetEncoding);
Objects.requireNonNull(sourceFormat);
if( (AudioFormat.Encoding.PCM_SIGNED.equals(targetEncoding)
&& AudioFormat.Encoding.ULAW.equals(sourceFormat.getEncoding()))
||
(AudioFormat.Encoding.ULAW.equals(targetEncoding)
&& AudioFormat.Encoding.PCM_SIGNED.equals(sourceFormat.getEncoding()))) {
return getOutputFormats(sourceFormat);
} else {
return new AudioFormat[0];
}
}
FloatSampleBuffer.java 文件源码
项目:romanov
阅读 21
收藏 0
点赞 0
评论 0
/**
* Writes this sample buffer's audio data to <code>buffer</code> as an
* interleaved byte array. <code>buffer</code> must be large enough to
* hold all data.
*
* @param readOffset the sample offset from where samples are read from this
* FloatSampleBuffer
* @param lenInSamples how many samples are converted
* @param buffer the byte buffer written to
* @param writeOffset the byte offset in buffer
* @throws IllegalArgumentException when buffer is too small or
* <code>format</code> doesn't match
* @return number of bytes written to <code>buffer</code>
*/
public int convertToByteArray(int readOffset, int lenInSamples,
byte[] buffer, int writeOffset, AudioFormat format) {
int byteCount = format.getFrameSize() * lenInSamples;
if (writeOffset + byteCount > buffer.length) {
throw new IllegalArgumentException(
"FloatSampleBuffer.convertToByteArray: buffer too small.");
}
if (format != lastConvertToByteArrayFormat) {
if (format.getSampleRate() != getSampleRate()) {
throw new IllegalArgumentException(
"FloatSampleBuffer.convertToByteArray: different samplerates.");
}
if (format.getChannels() != getChannelCount()) {
throw new IllegalArgumentException(
"FloatSampleBuffer.convertToByteArray: different channel count.");
}
lastConvertToByteArrayFormat = format;
lastConvertToByteArrayFormatCode = FloatSampleTools.getFormatType(format);
}
FloatSampleTools.float2byte(channels, readOffset, buffer, writeOffset,
lenInSamples, lastConvertToByteArrayFormatCode,
format.getChannels(), format.getFrameSize(),
getConvertDitherBits(lastConvertToByteArrayFormatCode));
return byteCount;
}
WriteAuUnspecifiedLength.java 文件源码
项目:openjdk-jdk10
阅读 20
收藏 0
点赞 0
评论 0
public static void main(String argv[]) throws Exception {
AudioFormat format = new AudioFormat(44100, 16, 2, true, true);
InputStream is = new ByteArrayInputStream(new byte[1000]);
AudioInputStream ais = new AudioInputStream(is, format, AudioSystem.NOT_SPECIFIED);
AudioSystem.write(ais, AudioFileFormat.Type.AU, new ByteArrayOutputStream());
System.out.println("Test passed.");
}
PCMtoPCMCodec.java 文件源码
项目:openjdk-jdk10
阅读 17
收藏 0
点赞 0
评论 0
@Override
public AudioFormat.Encoding[] getTargetEncodings(AudioFormat sourceFormat) {
final int sampleSize = sourceFormat.getSampleSizeInBits();
AudioFormat.Encoding encoding = sourceFormat.getEncoding();
if (sampleSize == 8) {
if (encoding.equals(AudioFormat.Encoding.PCM_SIGNED)) {
return new AudioFormat.Encoding[]{
AudioFormat.Encoding.PCM_UNSIGNED
};
}
if (encoding.equals(AudioFormat.Encoding.PCM_UNSIGNED)) {
return new AudioFormat.Encoding[]{
AudioFormat.Encoding.PCM_SIGNED
};
}
} else if (sampleSize == 16) {
if (encoding.equals(AudioFormat.Encoding.PCM_SIGNED)
|| encoding.equals(AudioFormat.Encoding.PCM_UNSIGNED)) {
return new AudioFormat.Encoding[]{
AudioFormat.Encoding.PCM_UNSIGNED,
AudioFormat.Encoding.PCM_SIGNED
};
}
}
return new AudioFormat.Encoding[0];
}
FloatSampleTools.java 文件源码
项目:romanov
阅读 19
收藏 0
点赞 0
评论 0
/**
* @see #byte2float(byte[] input, int inByteOffset, Object[] output, int
* outOffset, int frameCount, AudioFormat format, boolean
* allowAddChannel)
*/
public static void byte2float(byte[] input, int inByteOffset,
List<float[]> output, int outOffset, int frameCount,
AudioFormat format) {
byte2float(input, inByteOffset, output, outOffset, frameCount, format,
true);
}
Replay2.java 文件源码
项目:rcom
阅读 21
收藏 0
点赞 0
评论 0
public static void main(String[] args) throws IOException, LineUnavailableException {
try(Scanner br=new Scanner(System.in))
{
String s=br.nextLine();
byte[] data=UtilFile.loadFile(new File("/tmp/"+s+".sw"));
System.out.println("Playing: "+s);
AudioFormat format=ManualTestEchoCancel.getFormat();
final Mixer mixer = AudioSystem.getMixer(null);
Play p=new Play(mixer, format, ManualTestEchoCancel.frameSamples)
{
};
p.start();
p.setSample(data);
}
}
AlawCodec.java 文件源码
项目:openjdk-jdk10
阅读 20
收藏 0
点赞 0
评论 0
@Override
public AudioInputStream getAudioInputStream(AudioFormat targetFormat, AudioInputStream sourceStream){
if (!isConversionSupported(targetFormat, sourceStream.getFormat()))
throw new IllegalArgumentException("Unsupported conversion: "
+ sourceStream.getFormat().toString() + " to "
+ targetFormat.toString());
return getConvertedStream( targetFormat, sourceStream );
}
Toolkit.java 文件源码
项目:openjdk-jdk10
阅读 27
收藏 0
点赞 0
评论 0
static boolean isFullySpecifiedPCMFormat(AudioFormat format) {
if (!format.getEncoding().equals(AudioFormat.Encoding.PCM_SIGNED)
&& !format.getEncoding().equals(AudioFormat.Encoding.PCM_UNSIGNED)) {
return false;
}
if ((format.getFrameRate() <= 0)
|| (format.getSampleRate() <= 0)
|| (format.getSampleSizeInBits() <= 0)
|| (format.getFrameSize() <= 0)
|| (format.getChannels() <= 0)) {
return false;
}
return true;
}
JSMinim.java 文件源码
项目:romanov
阅读 19
收藏 0
点赞 0
评论 0
public AudioSample getAudioSample(float[] left, float[] right, AudioFormat format, int bufferSize)
{
FloatSampleBuffer sample = new FloatSampleBuffer(2, left.length, format.getSampleRate());
System.arraycopy(left, 0, sample.getChannel(0), 0, left.length);
System.arraycopy(right, 0, sample.getChannel(1), 0, right.length);
return getAudioSampleImp(sample, format, bufferSize);
}
UlawCodec.java 文件源码
项目:jdk8u-jdk
阅读 17
收藏 0
点赞 0
评论 0
/**
*/
public AudioFormat[] getTargetFormats(AudioFormat.Encoding targetEncoding, AudioFormat sourceFormat){
if( (AudioFormat.Encoding.PCM_SIGNED.equals(targetEncoding)
&& AudioFormat.Encoding.ULAW.equals(sourceFormat.getEncoding()))
||
(AudioFormat.Encoding.ULAW.equals(targetEncoding)
&& AudioFormat.Encoding.PCM_SIGNED.equals(sourceFormat.getEncoding()))) {
return getOutputFormats(sourceFormat);
} else {
return new AudioFormat[0];
}
}