Tim Boldstad http://timbolstad.com/2010/03/16/core-audio-getting-started-pt2/ (하나님을 축복해 주시겠습니까?)이 제공 한 코드를 수정하고 출력 톤 주파수 형식을 40hz에서 200000hz로 변경할 수있는 작은 슬라이더를 추가했습니다. 나는 이제 생성 된 톤에 대해 LPF를 사용할 수 있기를 원합니다.두 오디오 유닛 사이에 AULowPass 필터 추가하기
우선, any1에는이를 수행하는 방법을 설명하는 자세한 가이드가 있습니다. 그저 사이에 노드를 추가하려했지만 작동하지 않습니다. 필터에 오디오 샘플 입력을 제공하기 전에 16 비트 정수 샘플을 부동 8.24 형식으로 변환해야합니다. 그런 다음 변환해야합니다. 다시 16 비트 정수로. 이게 문제 야? 또는 노드를 잘못 연결 했습니까? 필터 차단 주파수 및 기타 매개 변수는 어디에 설정해야합니까?
누구나 AudioUnitGetProperty의 기능을 설명 할 수 있습니까? 이러한 항목에 대한 애플의 문서는 극단적으로 세분화하고
-(void) initializeAUGraph
{
OSStatus result= noErr;
result = NewAUGraph(&mGraph);
AUNode outputNode;
AUNode mixerNode;
AUNode effectsNode;
AudioComponentDescription effects_desc;
effects_desc.componentType = kAudioUnitType_Effect;
effects_desc.componentSubType = kAudioUnitSubType_LowPassFilter;
effects_desc.componentFlags = 0;
effects_desc.componentFlagsMask = 0;
effects_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponentDescription mixer_desc;
mixer_desc.componentType=kAudioUnitType_Mixer;
mixer_desc.componentSubType=kAudioUnitSubType_MultiChannelMixer;
mixer_desc.componentFlags=0;
mixer_desc.componentFlagsMask=0;
mixer_desc.componentManufacturer=kAudioUnitManufacturer_Apple;
AudioComponentDescription output_desc;
output_desc.componentType = kAudioUnitType_Output;
output_desc.componentSubType = kAudioUnitSubType_RemoteIO;
output_desc.componentFlags = 0;
output_desc.componentFlagsMask = 0;
output_desc.componentManufacturer = kAudioUnitManufacturer_Apple;
result= AUGraphAddNode(mGraph, &output_desc, &outputNode);
result= AUGraphAddNode(mGraph, &mixer_desc, &mixerNode);
result=AUGraphAddNode(mGraph, &effects_desc, &effectsNode);
result=AUGraphConnectNodeInput(mGraph, mixerNode, 0, effectsNode, 0);
result=AUGraphConnectNodeInput(mGraph, effectsNode, 0, outputNode, 0);
result=AUGraphOpen(mGraph);
//getting mixxer
result = AUGraphNodeInfo(mGraph, mixerNode, NULL, &mMixer);
result = AUGraphNodeInfo(mGraph, effectsNode, NULL, &mEffects);
UInt32 numbuses = 1;
UInt32 size = sizeof(numbuses);
result = AudioUnitSetProperty(mMixer, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &numbuses, size);
//=====
CAStreamBasicDescription desc;
// Loop through and setup a callback for each source you want to send to the mixer.
// Right now we are only doing a single bus so we could do without the loop.
for (int i = 0; i < numbuses; ++i)
{
// Setup render callback struct
// This struct describes the function that will be called
// to provide a buffer of audio samples for the mixer unit.
AURenderCallbackStruct renderCallbackStruct;
renderCallbackStruct.inputProc = &renderInput;
renderCallbackStruct.inputProcRefCon = self;
// Set a callback for the specified node's specified input
result = AUGraphSetNodeInputCallback(mGraph, mixerNode, i, &renderCallbackStruct);
// Get a CAStreamBasicDescription from the mixer bus.
size = sizeof(desc);
result = AudioUnitGetProperty( mMixer,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
i,
&desc,
&size);
// Initializes the structure to 0 to ensure there are no spurious values.
memset (&desc, 0, sizeof (desc));
// Make modifications to the CAStreamBasicDescription
// We're going to use 16 bit Signed Ints because they're easier to deal with
// The Mixer unit will accept either 16 bit signed integers or
// 32 bit 8.24 fixed point integers.
desc.mSampleRate = kGraphSampleRate; // set sample rate
desc.mFormatID = kAudioFormatLinearPCM;
desc.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
desc.mBitsPerChannel = sizeof(AudioSampleType) * 8; // AudioSampleType == 16 bit signed ints
desc.mChannelsPerFrame = 1;
desc.mFramesPerPacket = 1;
desc.mBytesPerFrame = (desc.mBitsPerChannel/8) * desc.mChannelsPerFrame;
desc.mBytesPerPacket = desc.mBytesPerFrame * desc.mFramesPerPacket;
printf("Mixer file format: "); desc.Print();
// Apply the modified CAStreamBasicDescription to the mixer input bus
result = AudioUnitSetProperty( mMixer,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
i,
&desc,
sizeof(desc));
}
// Apply the CAStreamBasicDescription to the mixer output bus
result = AudioUnitSetProperty( mMixer,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&desc,
sizeof(desc));
//************************************************************
//*** Setup the audio output stream ***
//************************************************************
// Get a CAStreamBasicDescription from the output Audio Unit
result = AudioUnitGetProperty( mMixer,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&desc,
&size);
// Initializes the structure to 0 to ensure there are no spurious values.
memset (&desc, 0, sizeof (desc));
// Make modifications to the CAStreamBasicDescription
// AUCanonical on the iPhone is the 8.24 integer format that is native to the iPhone.
// The Mixer unit does the format shifting for you.
desc.SetAUCanonical(1, true);
desc.mSampleRate = kGraphSampleRate;
// Apply the modified CAStreamBasicDescription to the output Audio Unit
result = AudioUnitSetProperty( mMixer,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
&desc,
sizeof(desc));
// Once everything is set up call initialize to validate connections
result = AUGraphInitialize(mGraph);
}
아마도 당신은 사람들에게 당신을 위해 CAShow에서보고있는 것을 게시 할 수 있습니다. – lppier