Forum > Cocoa

[Solved] CoreAudio

(1/2) > >>

Key-Real:
It plays with annoying noise.

why?




--- Code: Pascal  [+][-]window.onload = function(){var x1 = document.getElementById("main_content_section"); if (x1) { var x = document.getElementsByClassName("geshi");for (var i = 0; i < x.length; i++) { x[i].style.maxHeight='none'; x[i].style.height = Math.min(x[i].clientHeight+15,306)+'px'; x[i].style.resize = "vertical";}};} ---{$linkframework CoreAudio}{$MODE OBJFPC}uses MacOSAll,CocoaAll,crt;  type  TwavHeader = packed record                        RIFF:array [0..3] of char;        // RIFF Header Magic header            ChunkSize:dword;      // RIFF Chunk Size            WAVE:array [0..3] of char;        // WAVE Header                        fmt:array [0..3] of char;         // FMT header            Subchunk1Size:dword;  // Size of the fmt chunk            AudioFormat:word;    // Audio format 1=PCM,6=mulaw,7=alaw,     257=IBM Mu-Law, 258=IBM A-Law, 259=ADPCM            NumOfChan:word;      // Number of channels 1=Mono 2=Sterio            SamplesPerSec:dword;  // Sampling Frequency in Hz            bytesPerSec:dword;    // bytes per second            blockAlign:word;     // 2=16-bit mono, 4=16-bit stereo            bitsPerSample:word;  // Number of bits per sample                        Subchunk2ID:array [0..3] of char; // "data"  string            Subchunk2Size:dword;  // Sampled data length  end;  var  wavFile:file;  wavheader:TwavHeader;  function audioIOProc(inDevice:AudioObjectID;                   const inNow:AudioTimeStamp;                    const inInputData:AudioBufferList;                   const inInputTime:AudioTimeStamp;                    var outOutputData:AudioBufferList;                    const inOutputTime:AudioTimeStamp;                   inClientData:pointer                    ):OSStatus;MWPascal;  var       i:dword;    w:word;    f:single;        p:pointer;         buffer:AudioBuffer;         numToRead:dword;        singleSize:dword;   begin        buffer:=outOutputData.mBuffers[0];            numToRead:=buffer.mDataByteSize div sizeof(single);      getmem(p,numToRead);            blockread(wavFile,p^,numToRead);           for i:=0 to numToRead div 2 do begin            w:=pword(p+i*2)^;            f:=(w / $8000) - 1.0;            psingle(buffer.mData + i*sizeof(single))^:=f;          end;        freemem(p);    result:=noErr;end;   procedure printCoreAudioErrorAndExit(error:OSStatus);begin    if (error <> noErr) then begin        writeln('Error: ', error);        halt;    end;end;  var info:AudioObjectPropertyAddress;err:OSStatus;propertySize:UInt32;defaultOutputDevice:AudioObjectID;procID:AudioDeviceIOProcID; begin       randomize;   assign(wavFile,'output.wav');  reset(wavFile,1);   blockread(wavFile,wavHeader,sizeof(Twavheader));   with wavHeader do begin              writeln(RIFF[0],RIFF[1],RIFF[2],RIFF[3]);        // RIFF Header Magic header    writeln('RIFF Chunk Size ',ChunkSize);    writeln(WAVE[0],WAVE[1],WAVE[2],WAVE[3]);      // WAVE Header                writeln(fmt[0],fmt[1],fmt[2],fmt[3]);         // FMT header     writeln('Size of the fmt chunk ',Subchunk1Size);    writeln('Audio format ',AudioFormat);    // Audio format 1=PCM,6=mulaw,7=alaw,     257=IBM Mu-Law, 258=IBM A-Law, 259=ADPCM    writeln('Number of channels ', NumOfChan);      // Number of channels 1=Mono 2=Sterio    writeln('Sampling Frequency in Hz ',SamplesPerSec);  // Sampling Frequency in Hz    writeln('bytes per second ',bytesPerSec);    // bytes per second    writeln('blockAlign ',blockAlign);     // 2=16-bit mono, 4=16-bit stereo    writeln('Number of bits per sample ',bitsPerSample);  // Number of bits per sample                writeln(Subchunk2ID[0],Subchunk2ID[1],Subchunk2ID[2],Subchunk2ID[3]); // "data"  string    writeln('Sampled data length ',Subchunk2Size);  // Sampled data length  end;            info.mSelector := kAudioHardwarePropertyDefaultOutputDevice;        info.mElement := kAudioObjectPropertyElementMaster;        info.mScope := kAudioObjectPropertyScopeGlobal;                      err := AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, info, 0, nil, propertySize);    printCoreAudioErrorAndExit(err);         err:= AudioObjectGetPropertyData(kAudioObjectSystemObject, info, 0, nil, propertySize, @defaultOutputDevice);    printCoreAudioErrorAndExit(err);         err := AudioDeviceCreateIOProcID(defaultOutputDevice, @audioIOProc, nil, procID);    printCoreAudioErrorAndExit(err);        err := AudioDeviceStart(defaultOutputDevice, procID);    printCoreAudioErrorAndExit(err);        writeln('Press ENTER to stop.');    readln;     err := AudioDeviceStop(defaultOutputDevice, nil);    printCoreAudioErrorAndExit(err);        err := AudioDeviceDestroyIOProcID(defaultOutputDevice, procID);    printCoreAudioErrorAndExit(err);     close(wavFile);end. 
my .wav is:

--- Code: Pascal  [+][-]window.onload = function(){var x1 = document.getElementById("main_content_section"); if (x1) { var x = document.getElementsByClassName("geshi");for (var i = 0; i < x.length; i++) { x[i].style.maxHeight='none'; x[i].style.height = Math.min(x[i].clientHeight+15,306)+'px'; x[i].style.resize = "vertical";}};} ---RIFFRIFF Chunk Size 16449624WAVEfmt Size of the fmt chunk 16Audio format 1Number of channels 2Sampling Frequency in Hz 44100bytes per second 176400blockAlign 4Number of bits per sample 16dataSampled data length 16449580 

Thaddy:
Yes. Because you do not have the corrects sampling rate.....

Key-Real:
This time I setuped an other test:


--- Code: Pascal  [+][-]window.onload = function(){var x1 = document.getElementById("main_content_section"); if (x1) { var x = document.getElementsByClassName("geshi");for (var i = 0; i < x.length; i++) { x[i].style.maxHeight='none'; x[i].style.height = Math.min(x[i].clientHeight+15,306)+'px'; x[i].style.resize = "vertical";}};} ---{$linkframework AudioToolbox}{$MODE OBJFPC}uses MacOSAll,CocoaAll,crt; type     TSoundState = packed record        done:boolean;    end;    TwavHeader = packed record                        RIFF:array [0..3] of char;        // RIFF Header Magic header            ChunkSize:dword;      // RIFF Chunk Size            WAVE:array [0..3] of char;        // WAVE Header                        fmt:array [0..3] of char;         // FMT header            Subchunk1Size:dword;  // Size of the fmt chunk            AudioFormat:word;    // Audio format 1=PCM,6=mulaw,7=alaw,     257=IBM Mu-Law, 258=IBM A-Law, 259=ADPCM            NumOfChan:word;      // Number of channels 1=Mono 2=Sterio            SamplesPerSec:dword;  // Sampling Frequency in Hz            bytesPerSec:dword;    // bytes per second            blockAlign:word;     // 2=16-bit mono, 4=16-bit stereo            bitsPerSample:word;  // Number of bits per sample                        Subchunk2ID:array [0..3] of char; // "data"  string            Subchunk2Size:dword;  // Sampled data length  end;  var  wavFile:file;  wavheader:TwavHeader;    procedure auCallback(inUserData:pointer;  queue:AudioQueueRef;  buffer:AudioQueueBufferRef); MWPascal;var    SoundState : TSoundState;    p:pointer;    numToRead:dword;    i:dword;    w:word;    f:single; begin        SoundState:=TSoundState(inUserData^);     writeln('write');     buffer^.mAudioDataByteSize := 4098;        numToRead:=buffer^.mAudioDataByteSize div sizeof(single) * 2;      getmem(p,numToRead);            blockread(wavFile,p^,numToRead);           for i:=0 to numToRead div 2 do begin            w:=pword(p+i*2)^;            f:=(w / $8000) - 1;            psingle(buffer^.mAudioData + i*sizeof(single))^:=f;          end;        freemem(p);      AudioQueueEnqueueBuffer(queue, buffer, 0, nil);end; var    auDesc:AudioStreamBasicDescription;    auQueue:AudioQueueRef;    auBuffers:array[0..1] of AudioQueueBufferRef;    soundState:TSoundState;    err:OSStatus;    bufferSize:uint32;begin      assign(wavFile,'unreal.wav');  reset(wavFile,1);   blockread(wavFile,wavHeader,sizeof(Twavheader));   with wavHeader do begin              writeln(RIFF[0],RIFF[1],RIFF[2],RIFF[3]);        // RIFF Header Magic header    writeln('RIFF Chunk Size ',ChunkSize);    writeln(WAVE[0],WAVE[1],WAVE[2],WAVE[3]);      // WAVE Header                writeln(fmt[0],fmt[1],fmt[2],fmt[3]);         // FMT header     writeln('Size of the fmt chunk ',Subchunk1Size);    writeln('Audio format ',AudioFormat);    // Audio format 1=PCM,6=mulaw,7=alaw,     257=IBM Mu-Law, 258=IBM A-Law, 259=ADPCM    writeln('Number of channels ', NumOfChan);      // Number of channels 1=Mono 2=Sterio    writeln('Sampling Frequency in Hz ',SamplesPerSec);  // Sampling Frequency in Hz    writeln('bytes per second ',bytesPerSec);    // bytes per second    writeln('blockAlign ',blockAlign);     // 2=16-bit mono, 4=16-bit stereo    writeln('Number of bits per sample ',bitsPerSample);  // Number of bits per sample                writeln(Subchunk2ID[0],Subchunk2ID[1],Subchunk2ID[2],Subchunk2ID[3]); // "data"  string    writeln('Sampled data length ',Subchunk2Size);  // Sampled data length  end;      // stereo 16-bit interleaved linear PCM audio data at 48kHz in SNORM format        auDesc.mSampleRate := 48000;    auDesc.mFormatID := kAudioFormatLinearPCM;    auDesc.mFormatFlags := kLinearPCMFormatFlagIsBigEndian or kLinearPCMFormatFlagIsSignedInteger or kLinearPCMFormatFlagIsPacked;    auDesc.mBytesPerPacket := 4;    auDesc.mFramesPerPacket := 1;    auDesc.mBytesPerFrame := 4;    auDesc.mChannelsPerFrame := 2;    auDesc.mBitsPerChannel := 16;            // our persistent state for sound playback    soundState.done:=false; // 261.6 ~= Middle C frequency        // most of the 0 and nullptr params here are for compressed sound formats etc.    err := AudioQueueNewOutput(auDesc, @auCallback, @soundState, nil, nil, 0, auQueue);        if err=0 then begin        // generate buffers holding at most 1/16th of a second of data        bufferSize := round(auDesc.mBytesPerFrame * (auDesc.mSampleRate / 16));        err := AudioQueueAllocateBuffer(auQueue, bufferSize, auBuffers[0]);         if err=0 then begin            err := AudioQueueAllocateBuffer(auQueue, bufferSize, auBuffers[1]);             if err=0 then begin                // prime the buffers                auCallback(@soundState, auQueue, auBuffers[0]);                auCallback(@soundState, auQueue, auBuffers[1]);                 // enqueue for playing                AudioQueueEnqueueBuffer(auQueue, auBuffers[0], 0, nil);                AudioQueueEnqueueBuffer(auQueue, auBuffers[1], 0, nil);                 // go!                AudioQueueStart(auQueue, nil);            end;        end;    end;     // Our AudioQueue creation options put the CA handling on its own thread    // so this is a quick hack to allow us to hear some sound.      readln;         // be nice even it doesn't really matter at this point     AudioQueueDispose(auQueue, true);end. 
now I can setup the sampling rate and stuff.

it is better now, but I hear the sound only on the right speaker, and the sound is not 100% correct

My new File is 48000 sampling rate


pls hip

trev:
To play *.wav files, see my Wiki article macOS Audio Playter using the Apple AVFoundation framework which is more high level than CoreAudio.

Key-Real:
I don't wanna just Play a .WAV. It is a Test for writing in to the Buffer. I Chose a .WAV to have valid Data.

Navigation

[0] Message Index

[#] Next page

Go to full version