001/* A buffer supporting the playback of audio data and the the 002 writing of audio data to a sound file. 003 004 Copyright (c) 2000-2014 The Regents of the University of California. 005 All rights reserved. 006 Permission is hereby granted, without written agreement and without 007 license or royalty fees, to use, copy, modify, and distribute this 008 software and its documentation for any purpose, provided that the above 009 copyright notice and the following two paragraphs appear in all copies 010 of this software. 011 012 IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY 013 FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES 014 ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF 015 THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF 016 SUCH DAMAGE. 017 018 THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, 019 INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 020 MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE 021 PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF 022 CALIFORNIA HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, 023 ENHANCEMENTS, OR MODIFICATIONS. 024 025 PT_COPYRIGHT_VERSION_2 026 COPYRIGHTENDKEY 027 028 */ 029package ptolemy.media.javasound; 030 031import java.io.ByteArrayInputStream; 032import java.io.File; 033import java.io.IOException; 034import java.util.ArrayList; 035import java.util.StringTokenizer; 036 037import javax.sound.sampled.AudioFileFormat; 038import javax.sound.sampled.AudioFormat; 039import javax.sound.sampled.AudioInputStream; 040import javax.sound.sampled.AudioSystem; 041import javax.sound.sampled.DataLine; 042import javax.sound.sampled.LineUnavailableException; 043import javax.sound.sampled.SourceDataLine; 044 045/////////////////////////////////////////////////////////////////// 046//// SoundPlayback 047 048/** 049 A buffer supporting the playback of audio data and the the 050 writing of audio data to a sound file. 051 052 <h2>Overview</h2> 053 A buffer supporting the real-time playback of audio and the writing 054 of audio data to a sound file. Single channel 055 (mono) and multichannel audio (stereo) are supported. This class, 056 along with SoundCapture, intends to provide an easy to use interface 057 to Java Sound, Java's audio API. Java Sound supports the writing 058 of audio data to a sound file or the computer's audio output port, 059 but only at the byte level, which is audio format specific. This class, 060 however, provides higher level support for the writing of double 061 or integer valued samples to the computer's audio output port or 062 any supported sound file type. This class is therefore useful when 063 it one desires to playback audio samples in an audio format independent 064 way. 065 <p> 066 Depending on available system resources, it may be possible to 067 run an instance of this class and an instance of SoundCapture 068 concurrently. This allows for the concurrent capture, signal 069 processing, and playback of audio data. 070 <p> 071 <h2>Usage</h2> 072 Two constructors are provided. One constructor creates a sound playback 073 object that sends audio data to the speaker. If this constructor is 074 used, there will be a small 075 delay between the time that the audio data is delivered to this 076 object and the time that the corresponding audio is actually 077 heard. This latency can be adjusted by setting the <i>bufferSize</i> 078 constructor parameter. Another constructor 079 creates a sound playback object that sends audio data to a sound 080 file. 081 <p> 082 After calling the appropriate constructor, startPlayback() 083 must be called to initialize the audio system. 084 The putSamples() or putSamplesInt() method should then be repeatedly 085 called to deliver the audio data to the audio output device 086 (speaker or file). The audio samples delivered to putSamples() 087 should be in the proper range, or clipping will occur. 088 putSamples() expects the samples to be in the range (-1, 1). 089 putSamplesInt() expects the samples to be in the range 090 (-2^(bits_per_sample/2), 2^(bits_per_sample/2)), where 091 bits_per_sample is the number of bits per sample. 092 Note that it is possible (but probably 093 not useful) to interleave calls to putSamples() and 094 putSamplesInt(). 095 Finally, after no more audio playback is desired, stopPlayback() 096 should be called to free up audio system resources. 097 <p> 098 <h2>Security issues</h2>Applications have no restrictions on the 099 capturing or playback of audio. Applet code is not allowed to 100 write native files by default. The .java.policy file must be 101 modified to grant applets more privileges. 102 <p> 103 Note: Requires Java 2 v1.3.0 or later. 104 @author Brian K. Vogel 105 @version $Id$ 106 @since Ptolemy II 1.0 107 @Pt.ProposedRating Yellow (vogel) 108 @Pt.AcceptedRating Yellow (cxh) 109 @see ptolemy.media.javasound.SoundCapture 110 */ 111public class SoundPlayback { 112 /** Construct a sound playback object that plays audio through the 113 * computer's speaker. Note 114 * that when this constructor is used, putSamples() should be 115 * called often enough to prevent underflow of the internal audio 116 * input buffer. 117 * @param sampleRate Sample rate in Hz. Must be in the range: 8000 118 * to 48000. 119 * @param sampleSizeInBits Number of bits per sample (valid choices are 120 * 8 or 16). 121 * @param channels Number of audio channels. 1 for mono, 2 for 122 * stereo, etc. 123 * @param bufferSize Requested size of the internal audio input 124 * buffer in samples. This controls the latency (delay from 125 * the time putSamples() is called until the audio is 126 * actually heard). A lower bound on the latency is given by 127 * (<i>bufferSize</i> / <i>sampleRate</i>) seconds. 128 * Ideally, the 129 * smallest value that gives acceptable performance (no underflow) 130 * should be used. Typical values are about 1/10 th the sample 131 * rate. For example, at 44100 Hz sample rate, a typical buffer 132 * size value might be 4410. 133 * @param putSamplesSize Size of the array parameter of 134 * putSamples(). For performance reasons, the size should 135 * be chosen smaller than <i>bufferSize</i>. Typical values 136 * are 1/2 to 1/16 th of <i>bufferSize</i>. 137 */ 138 public SoundPlayback(float sampleRate, int sampleSizeInBits, int channels, 139 int bufferSize, int putSamplesSize) { 140 _isAudioPlaybackActive = false; 141 142 // Set mode to real-time. 143 this._playbackMode = "speaker"; 144 this._sampleSizeInBits = sampleSizeInBits; 145 this._sampleRate = sampleRate; 146 this._channels = channels; 147 this._bufferSize = bufferSize; 148 this._putSamplesSize = putSamplesSize; 149 } 150 151 /** Construct a sound playback object that writes audio to 152 * a sound file with the specified name. Valid sound file 153 * formats are WAVE (.wav), AIFF (.aif, .aiff), AU (.au). The file 154 * format is automatically determined from the file extension. 155 * The sound file will be initialized when startPlayback() is 156 * called. If there is a problem creating the sound file, an 157 * IOException will be thrown in startPlayback(). 158 * Thereafter, each call to putSamples() will add 159 * <i>putSamplesSize</i> samples to the sound file. To 160 * close and save the sound file, call stopPlayback(). 161 * <p> 162 * Note that the audio data will not actually be saved to file, 163 * <i>fileName</i>, until stopPlayback() is called. If an 164 * unknown audio format is used, an exception will be thrown 165 * in stopPlayback(). 166 * @param fileName The file name to create. If the file already 167 * exists, overwrite it. Valid sound file formats are WAVE (.wav), 168 * AIFF (.aif, .aiff), AU (.au). The file format to write is 169 * determined automatically from the file extension. 170 * @param sampleRate Sample rate in Hz. Must be in the range: 8000 171 * to 48000. 172 * @param sampleSizeInBits Number of bits per sample (valid choices are 173 * 8 or 16). 174 * @param channels Number of audio channels. 1 for mono, 2 for 175 * stereo. 176 * @param bufferSize Requested size of the internal audio input 177 * buffer in samples. This controls the latency (delay from 178 * the time putSamples() is called until the audio is 179 * actually heard). A lower bound on the latency is given by 180 * (<i>bufferSize</i> / <i>sampleRate</i>) seconds. 181 * Ideally, the 182 * smallest value that gives acceptable performance (no underflow) 183 * should be used. Typical values are about 1/10 th the sample 184 * rate. For example, at 44100 Hz sample rate, a typical buffer 185 * size value might be 4410. 186 * @param putSamplesSize Size of the array parameter of 187 * putSamples(). There is no restriction on the value of 188 * this parameter, but typical values are 64-2024. 189 */ 190 public SoundPlayback(String fileName, float sampleRate, 191 int sampleSizeInBits, int channels, int bufferSize, 192 int putSamplesSize) { 193 _isAudioPlaybackActive = false; 194 this._playbackMode = "file"; 195 this._fileName = fileName; 196 this._sampleSizeInBits = sampleSizeInBits; 197 this._sampleRate = sampleRate; 198 this._channels = channels; 199 this._productionRate = putSamplesSize; 200 } 201 202 /////////////////////////////////////////////////////////////////// 203 /// Public Methods /// 204 205 /** Play an array of audio samples. 206 * If the "play audio to speaker" constructor was called, 207 * then queue the array of audio samples in 208 * <i>putSamplesArray</i> for playback. There will be a 209 * latency before the audio data is actually heard, since the 210 * audio data in <i>putSamplesArray</i> is queued to an 211 * internal audio buffer. The size of the internal buffer 212 * is set by the constructor. A lower bound on the latency 213 * is given by (<i>bufferSize</i> / <i>sampleRate</i>) 214 * seconds. If the "play audio to speaker" mode is 215 * used, then this method should be invoked often 216 * enough to prevent underflow of the internal audio buffer. 217 * Underflow is undesirable since it will cause audible gaps 218 * in audio playback, but no exception or error condition will 219 * occur. If the caller attempts to write more data than can 220 * be written, this method blocks until the data can be 221 * written to the internal audio buffer. 222 * <p> 223 * If the "write audio to file" constructor was used, 224 * then append the audio data contained in <i>putSamplesArray</i> 225 * to the sound file specified in the constructor. Note that 226 * underflow cannot occur for this case. 227 * <p> 228 * The samples should be in the range (-1, 1). Samples that are 229 * outside this range will be hard-clipped so that they fall 230 * within this range. 231 * @param putSamplesArray A two dimensional array containing 232 * the samples to play or write to a file. The first index 233 * represents the channel number (0 for first channel, 1 for 234 * second channel, etc.). The second index represents the 235 * sample index within a channel. For example, 236 * putSamplesArray[n][m] contains the (m+1)th sample 237 * of the (n+1)th channel. putSamplesArray should be a 238 * rectangular array such that putSamplesArray.length() gives 239 * the number of channels and putSamplesArray[n].length() is 240 * equal to <i>putSamplesSize</i>, for all channels n. This 241 * is not actually checked, however. 242 * 243 * @exception IOException If there is a problem playing audio. 244 * @exception IllegalStateException If audio playback is currently 245 * inactive. That is, If startPlayback() has not yet been called 246 * or if stopPlayback() has already been called. 247 */ 248 public void putSamples(double[][] putSamplesArray) 249 throws IOException, IllegalStateException { 250 if (_isAudioPlaybackActive == true) { 251 if (_playbackMode.equals("speaker")) { 252 // Convert array of double valued samples into 253 // the proper byte array format. 254 _data = _doubleArrayToByteArray(putSamplesArray, 255 _bytesPerSample, _channels); 256 257 // Note: _data is a byte array containing data to 258 // be written to the output device. 259 // Note: consumptionRate is amount of data to write, in bytes. 260 // Now write the array to output device. 261 _sourceLine.write(_data, 0, 262 _putSamplesSize * _frameSizeInBytes); 263 } else if (_playbackMode.equals("file")) { 264 // Convert array of double valued samples into 265 // the proper byte array format. 266 _data = _doubleArrayToByteArray(putSamplesArray, 267 _bytesPerSample, _channels); 268 269 // Add new audio data to the file buffer array. 270 for (byte element : _data) { 271 _toFileBuffer.add(Byte.valueOf(element)); 272 } 273 } else { 274 // Should not happen since caught by constructor. 275 } 276 } else { 277 throw new IllegalStateException("SoundPlayback: " 278 + "putSamples() was called while audio playback was" 279 + " inactive (startPlayback() was never called or " 280 + "stopPlayback has already been called)."); 281 } 282 } 283 284 /** Play an array of audio samples. 285 * If the "play audio to speaker" constructor was called, 286 * then queue the array of audio samples in 287 * <i>putSamplesArray</i> for playback. The samples should be 288 * in the range (-2^(bits_per_sample/2), 2^(bits_per_sample/2)). 289 * There will be a latency before 290 * the audio data is actually heard, since the 291 * audio data in <i>putSamplesArray</i> is queued to an 292 * internal audio buffer. The size of the internal buffer 293 * is set by the constructor. A lower bound on the latency 294 * is given by (<i>bufferSize</i> / <i>sampleRate</i>) 295 * seconds. If the "play audio to speaker" mode is 296 * used, then this method should be invoked often 297 * enough to prevent underflow of the internal audio buffer. 298 * <p> 299 * If the "write audio to file" constructor was used, 300 * then append the audio data contained in <i>putSamplesArray</i> 301 * to the sound file specified in the constructor. 302 * <p> 303 * The samples should be in the range 304 * (-2^(bits_per_sample/2), 2^(bits_per_sample/2)). Samples 305 * that are outside this range will be hard-clipped. 306 * @param putSamplesArray A two dimensional array containing 307 * the samples to play or write to a file. The first index 308 * represents the channel number (0 for first channel, 1 for 309 * second channel, etc.). The second index represents the 310 * sample index within a channel. For example, 311 * putSamplesArray[n][m] contains the (m+1)th sample 312 * of the (n+1)th channel. putSamplesArray should be a 313 * rectangular array such that putSamplesArray.length() gives 314 * the number of channels and putSamplesArray[n].length() is 315 * equal to <i>putSamplesSize</i>, for all channels n. This 316 * is not actually checked, however. 317 * 318 * @exception IOException If there is a problem playing audio. 319 * @exception IllegalStateException If audio playback is currently 320 * inactive. That is, If startPlayback() has not yet been called 321 * or if stopPlayback() has already been called. 322 */ 323 public void putSamplesInt(int[][] putSamplesArray) 324 throws IOException, IllegalStateException { 325 if (_isAudioPlaybackActive == true) { 326 if (_playbackMode.equals("speaker")) { 327 // Convert array of double valued samples into 328 // the proper byte array format. 329 _data = _intArrayToByteArray(putSamplesArray, _bytesPerSample, 330 _channels); 331 332 // Note: _data is a byte array containing data to 333 // be written to the output device. 334 // Note: consumptionRate is amount of data to write, in bytes. 335 // Now write the array to output device. 336 _sourceLine.write(_data, 0, 337 _putSamplesSize * _frameSizeInBytes); 338 } else if (_playbackMode.equals("file")) { 339 // Convert array of double valued samples into 340 // the proper byte array format. 341 _data = _intArrayToByteArray(putSamplesArray, _bytesPerSample, 342 _channels); 343 344 // Add new audio data to the file buffer array. 345 for (byte element : _data) { 346 _toFileBuffer.add(Byte.valueOf(element)); 347 } 348 } else { 349 // Should not happen since caught by constructor. 350 } 351 } else { 352 throw new IllegalStateException("SoundPlayback: " 353 + "putSamples() was called while audio playback was" 354 + " inactive (startPlayback() was never called or " 355 + "stopPlayback has already been called)."); 356 } 357 } 358 359 /** Perform initialization for the playback of audio data. 360 * This method must be invoked prior 361 * to the first invocation of putSamples(). This method 362 * must not be called more than once between invocations of 363 * stopPlayback(), or an exception will be thrown. 364 * 365 * @exception IOException If there is a problem setting up 366 * the system for audio playback. This will occur if 367 * a file cannot be opened or if the audio out port cannot 368 * be accessed. 369 * @exception IllegalStateException If this method is called 370 * more than once between invocations of stopCapture(). 371 */ 372 public void startPlayback() throws IOException, IllegalStateException { 373 if (_isAudioPlaybackActive == false) { 374 if (_playbackMode.equals("speaker")) { 375 // Real time playback to speaker. 376 _startPlaybackRealTime(); 377 } else if (_playbackMode.equals("file")) { 378 // Record data to sound file. 379 _startPlaybackToFile(); 380 } else { 381 throw new IOException("SoundPlayback: " 382 + "startPlayback(): unknown playback mode: " 383 + _playbackMode); 384 } 385 386 _bytesPerSample = _sampleSizeInBits / 8; 387 _isAudioPlaybackActive = true; 388 } else { 389 throw new IllegalStateException("SoundPlayback: " 390 + "startPlayback() was called while audio playback was" 391 + " already active (startPlayback() was called " 392 + "more than once between invocations of stopPlayback())."); 393 } 394 } 395 396 /** Stop playing/writing audio. This method should be called when 397 * no more calls to putSamples(). are required, so 398 * that the system resources involved in the audio playback 399 * may be freed. 400 * <p> 401 * If the "write audio data to file" constructor was used, then 402 * the sound file specified by the constructor is saved and 403 * closed. 404 * 405 * @exception IOException If there is a problem closing the 406 * audio resources, or if the "write audio data 407 * to file" constructor was used and the sound file has an 408 * unsupported format. 409 */ 410 public void stopPlayback() throws IOException { 411 if (_isAudioPlaybackActive == true) { 412 if (_playbackMode.equals("speaker")) { 413 // Stop real-time playback to speaker. 414 if (_sourceLine != null) { 415 _sourceLine.drain(); 416 _sourceLine.stop(); 417 _sourceLine.close(); 418 } 419 420 _sourceLine = null; 421 } else if (_playbackMode.equals("file")) { 422 // Record data to sound file. 423 _stopPlaybackToFile(); 424 } else { 425 // Should not happen. 426 } 427 } 428 429 _isAudioPlaybackActive = false; 430 } 431 432 /////////////////////////////////////////////////////////////////// 433 //// private methods //// 434 private void _startPlaybackRealTime() throws IOException { 435 boolean signed = true; 436 boolean bigEndian = true; 437 438 AudioFormat format = new AudioFormat(_sampleRate, _sampleSizeInBits, 439 _channels, signed, bigEndian); 440 441 _frameSizeInBytes = format.getFrameSize(); 442 443 DataLine.Info sourceInfo = new DataLine.Info(SourceDataLine.class, 444 format, AudioSystem.NOT_SPECIFIED); 445 446 // get and open the source data line for playback. 447 try { 448 // Source DataLinet is really a target for 449 // audio data, not a source. 450 _sourceLine = (SourceDataLine) AudioSystem.getLine(sourceInfo); 451 452 // Open line and suggest a buffer size (in bytes) to use or 453 // the internal audio buffer. 454 _sourceLine.open(format, _bufferSize * _frameSizeInBytes); 455 } catch (LineUnavailableException ex) { 456 throw new IOException("Unable to open the line for " 457 + "real-time audio playback: " + ex); 458 } 459 460 // Array of audio samples in byte format. 461 _data = new byte[_productionRate * _frameSizeInBytes * _channels]; 462 463 // Start the source data line 464 _sourceLine.start(); 465 } 466 467 private void _startPlaybackToFile() { 468 // FIXME: Performance is not great when the incoming audio 469 // samples are being captured in real-time, possibly 470 // due to resizing of the ArrayList. 471 // 472 // Array to hold all data to be saved to file. Grows 473 // as new data are added (via putSamples()). 474 // Each element is a byte of audio data. 475 _toFileBuffer = new ArrayList(); 476 477 boolean signed = true; 478 boolean bigEndian = true; 479 480 _playToFileFormat = new AudioFormat(_sampleRate, _sampleSizeInBits, 481 _channels, signed, bigEndian); 482 483 _frameSizeInBytes = _playToFileFormat.getFrameSize(); 484 } 485 486 private void _stopPlaybackToFile() throws IOException { 487 int size = _toFileBuffer.size(); 488 byte[] audioBytes = new byte[size]; 489 490 for (int i = 0; i < size; i++) { 491 Byte j = (Byte) _toFileBuffer.get(i); 492 audioBytes[i] = j.byteValue(); 493 } 494 495 ByteArrayInputStream byteInputArrayStream = null; 496 AudioInputStream audioInputStream = null; 497 498 try { 499 byteInputArrayStream = new ByteArrayInputStream(audioBytes); 500 501 audioInputStream = new AudioInputStream(byteInputArrayStream, 502 _playToFileFormat, audioBytes.length / _frameSizeInBytes); 503 504 File outFile = new File(_fileName); 505 506 StringTokenizer st = new StringTokenizer(_fileName, "."); 507 508 // Do error checking: 509 if (st.countTokens() != 2) { 510 throw new IOException("Error: Incorrect " + "file name format. " 511 + "Format: filename.extension"); 512 } 513 514 st.nextToken(); // Advance to the file extension. 515 516 String fileExtension = st.nextToken(); 517 518 if (fileExtension.equalsIgnoreCase("au")) { 519 // Save the file. 520 AudioSystem.write(audioInputStream, AudioFileFormat.Type.AU, 521 outFile); 522 } else if (fileExtension.equalsIgnoreCase("aiff")) { 523 // Save the file. 524 AudioSystem.write(audioInputStream, AudioFileFormat.Type.AIFF, 525 outFile); 526 } else if (fileExtension.equalsIgnoreCase("wave")) { 527 // Save the file. 528 AudioSystem.write(audioInputStream, AudioFileFormat.Type.WAVE, 529 outFile); 530 } else if (fileExtension.equalsIgnoreCase("wav")) { 531 // Save the file. 532 AudioSystem.write(audioInputStream, AudioFileFormat.Type.WAVE, 533 outFile); 534 } else if (fileExtension.equalsIgnoreCase("aifc")) { 535 // Save the file. 536 AudioSystem.write(audioInputStream, AudioFileFormat.Type.AIFC, 537 outFile); 538 } else { 539 throw new IOException("Error saving " 540 + "file: Unknown file format: " + fileExtension); 541 } 542 } catch (IOException e) { 543 throw new IOException( 544 "SoundPlayback: error saving" + " file: " + e); 545 } finally { 546 if (byteInputArrayStream != null) { 547 try { 548 byteInputArrayStream.close(); 549 } catch (Throwable throwable) { 550 System.out.println("Ignoring failure to close stream " 551 + "on " + audioBytes.length + " bytes of data."); 552 throwable.printStackTrace(); 553 } 554 } 555 556 if (audioInputStream != null) { 557 try { 558 audioInputStream.close(); 559 } catch (Throwable throwable) { 560 System.out.println("Ignoring failure to close stream " 561 + "on " + audioBytes.length + " bytes of data."); 562 throwable.printStackTrace(); 563 } 564 } 565 } 566 } 567 568 /* Convert a double array of audio samples into a byte array of 569 * audio samples in linear signed pcm big endian format. The 570 * samples contained in <i>doubleArray</i> should be in the 571 * range (-1, 1). Samples outside this range will be hard clipped 572 * to the range (-1, 1). 573 * @param doubleArray Two dimensional array holding audio samples. 574 * For each channel, m, doubleArray[m] is a single dimensional 575 * array containing samples for channel m. 576 * @param bytesPerSample Number of bytes per sample. Supported 577 * bytes per sample by this method are 8, 16, 24, 32. 578 * @param channels Number of audio channels. 579 * @return The linear signed pcm big endian byte array formatted 580 * array representation of <i>doubleArray</i>. The length of 581 * the returned array is (doubleArray.length*bytesPerSample*channels). 582 */ 583 private byte[] _doubleArrayToByteArray(double[][] doubleArray, 584 int bytesPerSample, int channels) { 585 // All channels had better have the same number 586 // of samples! This is not checked! 587 int lengthInSamples = doubleArray[0].length; 588 589 //double maxSample = Math.pow(2, 8 * bytesPerSample - 1); 590 // Could use above line, but hopefully, code below will 591 // be faster. 592 double maxSample; 593 double maxDoubleValuedSample; 594 595 if (bytesPerSample == 2) { 596 maxSample = 32768; 597 } else if (bytesPerSample == 1) { 598 maxSample = 128; 599 } else if (bytesPerSample == 3) { 600 maxSample = 8388608; 601 } else if (bytesPerSample == 4) { 602 maxSample = 147483648e9; 603 } else { 604 // Should not happen. 605 maxSample = 0; 606 } 607 608 maxDoubleValuedSample = (maxSample - 2) / maxSample; 609 610 byte[] byteArray = new byte[lengthInSamples * bytesPerSample 611 * channels]; 612 byte[] b = new byte[bytesPerSample]; 613 614 for (int currSamp = 0; currSamp < lengthInSamples; currSamp++) { 615 int l; 616 617 // For each channel, 618 for (int currChannel = 0; currChannel < channels; currChannel++) { 619 // Perform clipping, if necessary. 620 if (doubleArray[currChannel][currSamp] >= maxDoubleValuedSample) { 621 l = (int) maxSample - 2; 622 } else if (doubleArray[currChannel][currSamp] <= -maxDoubleValuedSample) { 623 l = (int) -maxSample + 2; 624 } else { 625 // signed integer representation of current sample of the 626 // current channel. 627 l = (int) (doubleArray[currChannel][currSamp] * maxSample); 628 } 629 630 // Create byte representation of current sample. 631 for (int i = 0; i < bytesPerSample; i += 1, l >>= 8) { 632 b[bytesPerSample - i - 1] = (byte) l; 633 } 634 635 // Copy the byte representation of current sample to 636 // the linear signed pcm big endian formatted byte array. 637 for (int i = 0; i < bytesPerSample; i += 1) { 638 byteArray[currSamp * bytesPerSample * channels 639 + bytesPerSample * currChannel + i] = b[i]; 640 } 641 } 642 } 643 644 return byteArray; 645 } 646 647 /* Convert a integer array of audio samples into a byte array of 648 * audio samples in linear signed pcm big endian format. 649 * The samples contained by <i>intArray</i> should be in the range 650 * (-2^(bits_per_sample/2), 2^(bits_per_sample/2)). Samples that 651 * are outside this range will be hard-clipped to fall within this 652 * range. 653 * @param intArray Two dimensional array holding audio samples. 654 * For each channel, m, doubleArray[m] is a single dimensional 655 * array containing samples for channel m. 656 * @param bytesPerSample Number of bytes per sample. Supported 657 * bytes per sample by this method are 8, 16, 24, 32. 658 * @param channels Number of audio channels. 659 * @return The linear signed pcm big endian byte array formatted 660 * array representation of <i>doubleArray</i>. The length of 661 * the returned array is (doubleArray.length*bytesPerSample*channels). 662 */ 663 private byte[] _intArrayToByteArray(int[][] intArray, int bytesPerSample, 664 int channels) { 665 // All channels had better have the same number 666 // of samples! This is not checked! 667 int lengthInSamples = intArray[0].length; 668 669 byte[] byteArray = new byte[lengthInSamples * bytesPerSample 670 * channels]; 671 byte[] b = new byte[bytesPerSample]; 672 673 for (int currSamp = 0; currSamp < lengthInSamples; currSamp++) { 674 // For each channel, 675 for (int currChannel = 0; currChannel < channels; currChannel++) { 676 // signed integer representation of current sample of the 677 // current channel. 678 int l = intArray[currChannel][currSamp]; 679 680 // Perform clipping, if necessary. 681 int maxSample; 682 683 if (bytesPerSample == 2) { 684 maxSample = 32768; 685 } else if (bytesPerSample == 1) { 686 maxSample = 128; 687 } else if (bytesPerSample == 3) { 688 maxSample = 8388608; 689 } else if (bytesPerSample == 4) { 690 maxSample = 1474836480; 691 } else { 692 // Should not happen. 693 maxSample = 0; 694 } 695 696 if (l > maxSample - 1) { 697 l = maxSample - 1; 698 } else if (l < -maxSample + 1) { 699 l = -maxSample + 1; 700 } 701 702 // Create byte representation of current sample. 703 for (int i = 0; i < bytesPerSample; i += 1, l >>= 8) { 704 b[bytesPerSample - i - 1] = (byte) l; 705 } 706 707 // Copy the byte representation of current sample to 708 // the linear signed pcm big endian formatted byte array. 709 for (int i = 0; i < bytesPerSample; i += 1) { 710 byteArray[currSamp * bytesPerSample * channels 711 + bytesPerSample * currChannel + i] = b[i]; 712 } 713 } 714 } 715 716 return byteArray; 717 } 718 719 /////////////////////////////////////////////////////////////////// 720 //// private variables //// 721 private int _productionRate; 722 723 private String _fileName; 724 725 private String _playbackMode; 726 727 private int _sampleSizeInBits; 728 729 private int _putSamplesSize; 730 731 private float _sampleRate; 732 733 private int _channels; 734 735 private int _bufferSize; 736 737 private SourceDataLine _sourceLine; 738 739 // Array of audio samples in byte format. 740 private byte[] _data; 741 742 private int _frameSizeInBytes; 743 744 private ArrayList _toFileBuffer; 745 746 // This is the format of _toFileBuffer. 747 private AudioFormat _playToFileFormat; 748 749 private int _bytesPerSample; 750 751 private boolean _isAudioPlaybackActive; 752}