本文整理汇总了C#中WaveFormat类的典型用法代码示例。如果您正苦于以下问题:C# WaveFormat类的具体用法?C# WaveFormat怎么用?C# WaveFormat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
WaveFormat类属于命名空间,在下文中一共展示了WaveFormat类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C#代码示例。
示例1: CreateMediaObject
protected override MediaObject CreateMediaObject(WaveFormat inputFormat, WaveFormat outputFormat)
{
_comObj = new DmoMP3DecoderObject();
var mediaObject = new MediaObject(Marshal.GetComInterfaceForObject(_comObj, typeof(IMediaObject)));
return mediaObject;
}
开发者ID:CheViana,项目名称:AudioLab,代码行数:7,代码来源:DmoMP3Decoder.cs
示例2: AACEncoder
public AACEncoder(WaveFormat sourceFormat, Stream targetStream, int defaultBitrate, Guid containerType)
{
if (sourceFormat == null)
throw new ArgumentNullException("sourceForamt");
if (targetStream == null)
throw new ArgumentNullException("targetStream");
if (!targetStream.CanWrite)
throw new ArgumentException("Stream is not writeable.");
if (defaultBitrate <= 0)
throw new ArgumentOutOfRangeException("defaultBitrate");
if (containerType == Guid.Empty)
throw new ArgumentNullException("containerType");
var targetMediaType = FindBestMediaType(MFMediaTypes.MFAudioFormat_AAC,
sourceFormat.SampleRate, sourceFormat.Channels, defaultBitrate);
if (targetMediaType == null)
throw new NotSupportedException("No AAC-Encoder was found. Check whether your system supports AAC encoding.");
var sourceMediaType = MediaFoundationCore.MediaTypeFromWaveFormat(sourceFormat);
SetTargetStream(targetStream, sourceMediaType, targetMediaType, containerType);
}
开发者ID:CheViana,项目名称:AudioLab,代码行数:26,代码来源:AACEncoder.cs
示例3: StartCapture
public void StartCapture(int sampleRate, Capture captureDevice)
{
StopCapture();
EmptyRequest();
this.sampleRate = sampleRate;
readPos = 0;
IsRecording = false;
record = null;
recordTime = 0;
noRecordTime = 0;
lastSample = null;
lastSize = 0;
capture = (captureDevice == null) ? new Capture() : captureDevice;
WaveFormat waveFormat = new WaveFormat();// Load the sound
waveFormat.BitsPerSample = 16;
waveFormat.BlockAlign = 2;
waveFormat.Channels = 1;
waveFormat.AverageBytesPerSecond = sampleRate * 2;
waveFormat.SamplesPerSecond = sampleRate;
waveFormat.FormatTag = WaveFormatTag.Pcm;
CaptureBufferDescription captureBuffDesc = new CaptureBufferDescription();
captureBuffDesc.BufferBytes = bufferSize;
captureBuffDesc.Format = waveFormat;
captureBuffer = new CaptureBuffer(captureBuffDesc, capture);
captureBuffer.Start(true);
captureThread = new Thread(captureLoop);
captureThread.Start();
new Thread(EmptyRequest).Start();
}
开发者ID:ClusterM,项目名称:google-speech-to-text-api.net,代码行数:35,代码来源:SpeechCapture.cs
示例4: MonoToStereoSource
public MonoToStereoSource(IWaveStream source)
: base(source)
{
if (source.WaveFormat.Channels != 1)
throw new ArgumentException("format of source has to be stereo(1 channel)", "source");
_waveFormat = new WaveFormat(source.WaveFormat.SampleRate, 32, 2, AudioEncoding.IeeeFloat);
}
开发者ID:CheViana,项目名称:AudioLab,代码行数:7,代码来源:MonoToSteroSource.cs
示例5: XAudio2Renderer
public unsafe XAudio2Renderer()
{
waveFormat = new WaveFormat();
waveFormat.FormatTag = WaveFormatTag.Pcm;
xAudio = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.AnyProcessor);
masteringVoice = new MasteringVoice(xAudio, 2, 44100);
}
开发者ID:cros107,项目名称:CrystalBoy,代码行数:7,代码来源:XAudio2Renderer.cs
示例6: WriteWave
public void WriteWave(Stream Stream, Action Writer, WaveFormat WaveFormat)
{
this.Stream = Stream;
this.BinaryWriter = new BinaryWriter(Stream);
WriteChunk("RIFF", () =>
{
Stream.Write(Encoding.ASCII.GetBytes("WAVE"), 0, 4);
WriteChunk("fmt ", () =>
{
//Stream.WriteStruct(WaveFormat);
var BinaryWriter = new BinaryWriter(Stream);
BinaryWriter.Write(WaveFormat.CompressionCode);
BinaryWriter.Write(WaveFormat.NumberOfChannels);
BinaryWriter.Write(WaveFormat.SampleRate);
BinaryWriter.Write(WaveFormat.BytesPerSecond);
BinaryWriter.Write(WaveFormat.BlockAlignment);
BinaryWriter.Write(WaveFormat.BitsPerSample);
BinaryWriter.Write(WaveFormat.Padding);
});
WriteChunk("data", () =>
{
Writer();
});
});
}
开发者ID:hermitdave,项目名称:nvorbis,代码行数:26,代码来源:Wav.cs
示例7: FromWaveFormat
/// <summary>
/// Creates a MediaType based on a given WaveFormat. Don't forget to call Free() for the returend MediaType.
/// </summary>
/// <param name="waveFormat">WaveFormat to create a MediaType from.</param>
/// <returns>Dmo MediaType</returns>
public static MediaType FromWaveFormat(WaveFormat waveFormat)
{
if (waveFormat == null)
throw new ArgumentNullException("waveFormat");
var mediaType = new MediaType();
NativeMethods.MoInitMediaType(ref mediaType, Marshal.SizeOf(waveFormat));
mediaType.MajorType = AudioSubTypes.MediaTypeAudio;
mediaType.SubType = WaveFormatExtensible.SubTypeFromWaveFormat(waveFormat);
mediaType.FixedSizeSamples = (mediaType.SubType == AudioSubTypes.IeeeFloat ||
mediaType.SubType == AudioSubTypes.Pcm)
? 1
: 0;
mediaType.FormatType = FORMAT_WaveFormatEx;
IntPtr hWaveFormat = Marshal.AllocHGlobal(Marshal.SizeOf(waveFormat));
Marshal.StructureToPtr(waveFormat, hWaveFormat, false);
if (hWaveFormat == IntPtr.Zero)
throw new InvalidOperationException("hWaveFormat == IntPtr.Zero");
if (mediaType.CbFormat < Marshal.SizeOf(waveFormat))
throw new InvalidOperationException("No memory for Format reserved");
mediaType.PtrFormat = hWaveFormat;
return mediaType;
}
开发者ID:hoangduit,项目名称:cscore,代码行数:33,代码来源:MediaType.cs
示例8: AcmStream
/// <summary>
/// Creates a new ACM stream to convert one format to another. Note that
/// not all conversions can be done in one step
/// </summary>
/// <param name="sourceFormat">The source audio format</param>
/// <param name="destFormat">The destination audio format</param>
public AcmStream(WaveFormat sourceFormat, WaveFormat destFormat)
{
try
{
streamHandle = IntPtr.Zero;
this.sourceFormat = sourceFormat;
int sourceBufferSize = Math.Max(16384, sourceFormat.AverageBytesPerSecond);
sourceBufferSize -= (sourceBufferSize % sourceFormat.BlockAlign);
MmException.Try(AcmInterop.acmStreamOpen(out streamHandle, IntPtr.Zero, sourceFormat, destFormat, null, 0, 0, AcmStreamOpenFlags.NonRealTime), "acmStreamOpen");
// horrible stuff due to wierd Marshalling issues
/*
IntPtr sourceFormatPointer = WaveFormat.MarshalToPtr(sourceFormat);
IntPtr destFormatPointer = WaveFormat.MarshalToPtr(destFormat);
MmResult result = AcmInterop.acmStreamOpen2(out streamHandle, IntPtr.Zero, sourceFormatPointer, destFormatPointer, null, 0, 0, AcmStreamOpenFlags.NonRealTime);
Marshal.FreeHGlobal(sourceFormatPointer);
Marshal.FreeHGlobal(destFormatPointer);
MmException.Try(result, "acmStreamOpen");*/
streamHeader = new AcmStreamHeader(streamHandle, sourceBufferSize, SourceToDest(sourceBufferSize));
driverHandle = IntPtr.Zero;
}
catch
{
// suppress the finalise and clean up resources
Dispose();
throw;
}
}
开发者ID:Punloeu,项目名称:karaoke,代码行数:35,代码来源:AcmStream.cs
示例9: Wa2Input
public Wa2Input(Stream file)
: base(null)
{
var header = new byte[0x2C];
if (header.Length != file.Read (header, 0, header.Length))
throw new EndOfStreamException();
if (!Binary.AsciiEqual (header, 8, "WAVEfmt "))
throw new InvalidFormatException();
var format = new WaveFormat();
format.FormatTag = LittleEndian.ToUInt16 (header, 0x14);
format.Channels = LittleEndian.ToUInt16 (header, 0x16);
format.SamplesPerSecond = LittleEndian.ToUInt32 (header, 0x18);
format.AverageBytesPerSecond = LittleEndian.ToUInt32 (header, 0x1C);
format.BlockAlign = LittleEndian.ToUInt16 (header, 0x20);
format.BitsPerSample = LittleEndian.ToUInt16 (header, 0x22);
format.ExtraSize = 0;
this.Format = format;
uint pcm_size = LittleEndian.ToUInt32 (header, 0x28);
var pcm = new byte[pcm_size];
Decode (file, pcm);
Source = new MemoryStream (pcm);
this.PcmSize = pcm_size;
file.Dispose();
}
开发者ID:Casidi,项目名称:GARbro,代码行数:26,代码来源:AudioWA2.cs
示例10: WmaWriter
/// <summary>
/// Create the writer indicating Metadata information
/// </summary>
/// <param name="output"><see cref="System.IO.Stream"/> Where resulting WMA string will be written</param>
/// <param name="format">PCM format of input data received in <see cref="WmaWriter.Write"/> method</param>
/// <param name="profile">IWMProfile that describe the resulting compressed stream</param>
/// <param name="metadataAttributes">Array of <see cref="yeti.wma.structs.WM_Attr"/> structures describing the metadata information that will be in the result stream</param>
public WmaWriter(Stream output, WaveFormat format, IWMProfile profile, IEnumerable<WM_Attr> metadataAttributes)
: base(output, format)
{
m_Writer = WM.CreateWriter();
var wa = (IWMWriterAdvanced)m_Writer;
wa.AddSink((IWMWriterSink)this);
m_Writer.SetProfile(profile);
uint inputs;
m_Writer.GetInputCount(out inputs);
if (inputs == 1)
{
IWMInputMediaProps inpProps;
Guid type;
m_Writer.GetInputProps(0, out inpProps);
inpProps.GetType(out type);
if (type == MediaTypes.WMMEDIATYPE_Audio)
{
WM_MEDIA_TYPE mt;
mt.majortype = MediaTypes.WMMEDIATYPE_Audio;
mt.subtype = MediaTypes.WMMEDIASUBTYPE_PCM;
mt.bFixedSizeSamples = true;
mt.bTemporalCompression = false;
mt.lSampleSize = (uint)m_InputDataFormat.nBlockAlign;
mt.formattype = MediaTypes.WMFORMAT_WaveFormatEx;
mt.pUnk = IntPtr.Zero;
mt.cbFormat = (uint)Marshal.SizeOf(m_InputDataFormat);
GCHandle h = GCHandle.Alloc(m_InputDataFormat, GCHandleType.Pinned);
try
{
mt.pbFormat = h.AddrOfPinnedObject();
inpProps.SetMediaType(ref mt);
}
finally
{
h.Free();
}
m_Writer.SetInputProps(0, inpProps);
if (metadataAttributes != null)
{
var info = new WMHeaderInfo((IWMHeaderInfo)m_Writer);
foreach (WM_Attr attr in metadataAttributes)
{
info.SetAttribute(attr);
}
info = null;
}
m_Writer.BeginWriting();
m_Profile = profile;
}
else
{
throw new ArgumentException("Invalid profile", "profile");
}
}
else
{
throw new ArgumentException("Invalid profile", "profile");
}
}
开发者ID:pclancy,项目名称:yeti,代码行数:67,代码来源:WmaWriter.cs
示例11: Sound
public Sound(IntPtr handle, DirectSound device)
{
if (device != null)
{
device.SetCooperativeLevel(handle, CooperativeLevel.Priority);
var format = new WaveFormat
{
SamplesPerSecond = 44100,
BitsPerSample = 16,
Channels = 2,
FormatTag = WaveFormatTag.Pcm,
BlockAlignment = 4
};
format.AverageBytesPerSecond = format.SamplesPerSecond * format.Channels * (format.BitsPerSample / 8);
var desc = new SoundBufferDescription
{
Format = format,
Flags =
BufferFlags.GlobalFocus | BufferFlags.Software | BufferFlags.GetCurrentPosition2 | BufferFlags.ControlVolume,
SizeInBytes = BufferSize
};
DSoundBuffer = new SecondarySoundBuffer(device, desc);
ChangeVolume(Global.Config.SoundVolume);
}
SoundBuffer = new byte[BufferSize];
disposed = false;
}
开发者ID:ddugovic,项目名称:RASuite,代码行数:30,代码来源:Sound.cs
示例12: DmoChannelResampler
/// <summary>
/// Initializes a new instance of the <see cref="DmoChannelResampler"/> class.
/// </summary>
/// <param name="source">Underlying source which has to get resampled.</param>
/// <param name="channelMatrix"><see cref="ChannelMatrix" /> which defines how to map each channel.</param>
/// <param name="outputFormat">Waveformat, which specifies the new format. Note, that by far not all formats are supported.</param>
/// <exception cref="System.ArgumentNullException">
/// source
/// or
/// channelMatrix
/// or
/// outputFormat
/// </exception>
/// <exception cref="System.ArgumentException">The number of channels of the source has to be equal to the number of input channels specified by the channelMatrix.</exception>
public DmoChannelResampler(IWaveSource source, ChannelMatrix channelMatrix, WaveFormat outputFormat)
: base(source, outputFormat)
{
if (source == null)
throw new ArgumentNullException("source");
if (channelMatrix == null)
throw new ArgumentNullException("channelMatrix");
if(outputFormat == null)
throw new ArgumentNullException("outputFormat");
if (source.WaveFormat.Channels != channelMatrix.InputChannelCount)
{
throw new ArgumentException(
"The number of channels of the source has to be equal to the number of input channels specified by the channelMatrix.");
}
var inputFormat = new WaveFormatExtensible(
source.WaveFormat.SampleRate,
source.WaveFormat.BitsPerSample,
source.WaveFormat.Channels,
WaveFormatExtensible.SubTypeFromWaveFormat(source.WaveFormat),
channelMatrix.InputMask);
Outputformat = new WaveFormatExtensible(
outputFormat.SampleRate,
outputFormat.BitsPerSample,
outputFormat.Channels,
WaveFormatExtensible.SubTypeFromWaveFormat(outputFormat),
channelMatrix.OutputMask);
Initialize(inputFormat, Outputformat);
_channelMatrix = channelMatrix;
CommitChannelMatrixChanges();
}
开发者ID:opcon,项目名称:cscore,代码行数:48,代码来源:DmoChannelResampler.cs
示例13: DirectSoundSecondaryBuffer
/// <summary>
/// Initializes a new instance of the <see cref="DirectSoundSecondaryBuffer"/> class.
/// </summary>
/// <param name="directSound">A <see cref="DirectSoundBase"/> instance which provides the <see cref="DirectSoundBase.CreateSoundBuffer"/> method.</param>
/// <param name="waveFormat">The <see cref="WaveFormat"/> of the sound buffer.</param>
/// <param name="bufferSize">The buffer size. Internally, the <see cref="DSBufferDescription.BufferBytes"/> will be set to <paramref name="bufferSize"/> * 2.</param>
/// <exception cref="System.ArgumentNullException"><paramref name="directSound"/> or <paramref name="waveFormat"/></exception>
/// <exception cref="ArgumentOutOfRangeException"><paramref name="bufferSize"/> must be a value between 4 and 0x0FFFFFFF.</exception>
public DirectSoundSecondaryBuffer(DirectSoundBase directSound, WaveFormat waveFormat, int bufferSize)
{
if (directSound == null)
throw new ArgumentNullException("directSound");
if (waveFormat == null)
throw new ArgumentNullException("waveFormat");
if(bufferSize < 4 || bufferSize > 0x0FFFFFFF)
throw new ArgumentOutOfRangeException("bufferSize");
DSBufferDescription secondaryBufferDesc = new DSBufferDescription()
{
BufferBytes = bufferSize,
Flags = DSBufferCapsFlags.ControlFrequency | DSBufferCapsFlags.ControlPan |
DSBufferCapsFlags.ControlVolume | DSBufferCapsFlags.ControlPositionNotify |
DSBufferCapsFlags.GetCurrentPosition2 | DSBufferCapsFlags.GlobalFocus |
DSBufferCapsFlags.StickyFocus,
Reserved = 0,
Guid3DAlgorithm = Guid.Empty
};
secondaryBufferDesc.Size = Marshal.SizeOf(secondaryBufferDesc);
GCHandle hWaveFormat = GCHandle.Alloc(waveFormat, GCHandleType.Pinned);
try
{
secondaryBufferDesc.PtrFormat = hWaveFormat.AddrOfPinnedObject();
//Create(directSound, secondaryBufferDesc);
BasePtr = directSound.CreateSoundBuffer(secondaryBufferDesc, IntPtr.Zero);
}
finally
{
hWaveFormat.Free();
}
}
开发者ID:hoangduit,项目名称:cscore,代码行数:41,代码来源:DirectSoundSecondaryBuffer.cs
示例14: AddMixerInput
/// <summary>
/// Adds a new mixer input
/// </summary>
/// <param name="mixerInput">Mixer input</param>
public void AddMixerInput(ISampleProvider mixerInput)
{
// we'll just call the lock around add since we are protecting against an AddMixerInput at
// the same time as a Read, rather than two AddMixerInput calls at the same time
lock (sources)
{
if (this.sources.Count >= maxInputs)
{
throw new InvalidOperationException("Too many mixer inputs");
}
this.sources.Add(mixerInput);
}
if (this.waveFormat == null)
{
this.waveFormat = mixerInput.WaveFormat;
}
else
{
if (this.WaveFormat.SampleRate != mixerInput.WaveFormat.SampleRate ||
this.WaveFormat.Channels != mixerInput.WaveFormat.Channels)
{
throw new ArgumentException("All mixer inputs must have the same WaveFormat");
}
}
}
开发者ID:hanistory,项目名称:hasuite,代码行数:29,代码来源:MixingSampleProvider.cs
示例15: ini
public void ini()
{
this.Text = this.Tag.ToString();
file = this.Text;
mWavFormat = SetWaveFormat();
try
{
f.Hide();
FileInfo de = new FileInfo(Path.GetDirectoryName(this.Text) + "\\" + Path.GetFileNameWithoutExtension(this.Text) + "T.wav");
if (de.Exists)
de.Delete();
CreateWaveFile(Path.GetDirectoryName(this.Text) + "\\" + Path.GetFileNameWithoutExtension(this.Text) + "T.wav");
CreateCaptuerDevice();
CreateCaptureBuffer();
CreateNotification();
if (File.Exists(Path.GetDirectoryName(file) + "//" + Path.GetFileNameWithoutExtension(file) + ".lrc"))
{
lrc = new Lyrics(file);
}
wi = new WaveInfo(file);
progressBar1.Maximum = (int)wi.Second;
}
catch (Exception ex)
{
MessageBox.Show("Error");
f.Show();
this.Close();
}
}
开发者ID:764664,项目名称:SimpleKaraoke,代码行数:30,代码来源:Form2.cs
示例16: SuggestFormat
public static WaveFormat SuggestFormat(WaveFormat sourceFormat)
{
WaveFormat result = new WaveFormat(sourceFormat.SampleRate, 16, sourceFormat.Channels); //todo: 16bits fix
AcmException.Try(AcmInterop.acmFormatSuggest(IntPtr.Zero, sourceFormat, result,
Marshal.SizeOf(result), AcmFormatSuggestFlags.FormatTag), "acmFormatSuggest");
return result;
}
开发者ID:CheViana,项目名称:AudioLab,代码行数:7,代码来源:AcmBufferConverter.cs
示例17: Sound
public Sound(string filename, int ID, short type)
: base(filename, ID)
{
// get the file data
WaveFile wf = FileManager.Instance.Load(filename);
if(wf.WavFile != null) // we have a wave file with headers
{
// set up the buffer properties
soundDesc = new BufferDescription();
soundDesc.GlobalFocus = false;
soundDesc.ControlVolume = true;
// enable 3D features for 3D sounds
if(type == Sound.THREED_SOUND)
{
soundDesc.Control3D = true;
soundDesc.Mute3DAtMaximumDistance = true;
}
// load the wave file from the stream into the buffer
sound = new SecondaryBuffer(wf.WavFile, soundDesc, ((DirectSoundManager)SoundManager.Instance).Device);
} else { // we have only raw PCM encoded sound data (usually from a decoder)
// convert the format settings
WaveFormat wfo = new WaveFormat();
wfo.BitsPerSample = wf.Bits;
wfo.Channels = wf.Channels;
wfo.SamplesPerSecond = wf.Frequency;
wfo.BlockAlign = (short)(wf.Bits*wf.Channels / 8);
wfo.FormatTag = WaveFormatTag.Pcm;
wfo.AverageBytesPerSecond = wf.Frequency * wfo.BlockAlign;
// set up buffer properties
soundDesc = new BufferDescription(wfo);
soundDesc.GlobalFocus = false;
soundDesc.ControlVolume = true;
soundDesc.BufferBytes = (int)wf.Data.Length;
// enable 3D features for 3D sounds
if(type == Sound.THREED_SOUND)
{
soundDesc.Control3D = true;
soundDesc.Mute3DAtMaximumDistance = true;
}
// initialise the buffer and copy the (raw data) stream into it
sound = new SecondaryBuffer(soundDesc, ((DirectSoundManager)SoundManager.Instance).Device);
sound.Write(0, wf.Data, (int)wf.Data.Length, LockFlag.EntireBuffer);
}
// create a 3D buffer for 3D sounds
if(type == Sound.THREED_SOUND)
{
threeDsound = new Buffer3D(sound);
threeDsound.Mode = Mode3D.Normal;
threeDsound.Deferred = true;
}
}
开发者ID:BackupTheBerlios,项目名称:agex-svn,代码行数:60,代码来源:Sound.cs
示例18: SoundEffect
/// <summary>
/// Initializes a new instance of the <see cref="SoundEffect"/> class.
/// </summary>
/// <param name="audioManager">The associated audio manager instance.</param>
/// <param name="name">The name of the current instance.</param>
/// <param name="waveFormat">The format of the current instance.</param>
/// <param name="buffer">The buffer containing audio data.</param>
/// <param name="decodedPacketsInfo">The information regaring decoded packets.</param>
internal SoundEffect(AudioManager audioManager, string name, WaveFormat waveFormat, DataStream buffer, uint[] decodedPacketsInfo)
{
AudioManager = audioManager;
Name = name;
Format = waveFormat;
AudioBuffer = new AudioBuffer
{
Stream = buffer,
AudioBytes = (int)buffer.Length,
Flags = BufferFlags.EndOfStream,
};
LoopedAudioBuffer = new AudioBuffer
{
Stream = buffer,
AudioBytes = (int)buffer.Length,
Flags = BufferFlags.EndOfStream,
LoopCount = AudioBuffer.LoopInfinite,
};
DecodedPacketsInfo = decodedPacketsInfo;
Duration = Format.SampleRate > 0 ? TimeSpan.FromMilliseconds(GetSamplesDuration() * 1000 / Format.SampleRate) : TimeSpan.Zero;
children = new List<WeakReference>();
VoicePool = AudioManager.InstancePool.GetVoicePool(Format);
}
开发者ID:EvanMachusak,项目名称:SharpDX,代码行数:34,代码来源:SoundEffect.cs
示例19: InitDirectSound
void InitDirectSound(IntPtr handle)
{
//Create the device
_SoundDevice = new DirectSound();
_SoundDevice.SetCooperativeLevel(handle, CooperativeLevel.Priority);
//Creat the wav format, it will be mono-44100-pcm-16bit
//TODO: support more wave formats
WaveFormat wav = new WaveFormat();
wav.FormatTag = WaveFormatTag.Pcm;
wav.SamplesPerSecond = 44100;
wav.Channels = 1;//mono
wav.BitsPerSample = 16;
wav.AverageBytesPerSecond = 88200;//wav.SamplesPerSecond * wav.Channels * (wav.BitsPerSample / 8);
wav.BlockAlignment = 2;//(wfx.Channels * wfx.BitsPerSample / 8);
BufferSize = 88200 * 5;
//Description
SoundBufferDescription des = new SoundBufferDescription();
des.Format = wav;
des.SizeInBytes = BufferSize;
des.Flags = BufferFlags.GlobalFocus | BufferFlags.Software;
//buffer
buffer = new SecondarySoundBuffer(_SoundDevice, des);
DATA = new byte[BufferSize];
buffer.Play(0, PlayFlags.Looping);
//channels
InitChannels();
}
开发者ID:PhilipBrockmeyer,项目名称:Wren,代码行数:27,代码来源:AudioProcessingUnit.cs
示例20: AcmFileWriter
/// <summary>
/// Creates a new instance of <see cref="AcmFileWriter"/>.
/// </summary>
/// <param name="FileName">Path to the file to write.</param>
/// <param name="Encoding"><see cref="WaveFormatTag"/> for written audio.</param>
/// <param name="Format"><see cref="WaveFormat"/> of input audio.</param>
public AcmFileWriter(string FileName, WaveFormatTag Encoding, WaveFormat Format)
{
if (FileName == null)
throw new ArgumentNullException(nameof(FileName));
_channel = GetDummyChannel(Format);
// Get the Length of the ACMFormat structure
var suggestedFormatLength = BassEnc.GetACMFormat(0);
var acmFormat = Marshal.AllocHGlobal(suggestedFormatLength);
try
{
// Retrieve ACMFormat and Init Encoding
if (BassEnc.GetACMFormat(_channel,
acmFormat,
suggestedFormatLength,
null,
// If encoding is Unknown, then let the User choose encoding.
Encoding == WaveFormatTag.Unknown ? 0 : ACMFormatFlags.Suggest,
Encoding) != 0)
_handle = BassEnc.EncodeStartACM(_channel, acmFormat, 0, FileName);
else throw new Exception(Bass.LastError.ToString());
}
finally
{
// Free the ACMFormat structure
Marshal.FreeHGlobal(acmFormat);
}
}
开发者ID:ManagedBass,项目名称:ManagedBass,代码行数:36,代码来源:AcmFileWriter.cs
注:本文中的WaveFormat类示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论