I'm working on source code which processes 2GB of data which represents 60 seconds of network traffic. The total processing time is around 40 seconds. I'm trying to optimize my code for performance as best as possible to try to bring total processing time under the 30 second mark.
My current analysis in dotTrace shows that 7.62% of the time of the 3.3 million calls my code makes is being spent within the constructor of my Timestamp struct.
Specifically, there are two statements which I'm trying to improve:
TimestampHigh = BitConverter.ToUInt32(timestampBytes, 0);
TimestampLow = BitConverter.ToUInt32(timestampBytes, 4);
Here is the full struct:
public readonly struct Timestamp
{
public uint TimestampHigh { get; }
public uint TimestampLow { get; }
public uint Seconds { get; }
public uint Microseconds { get; }
public DateTime LocalTime => new DateTime(EpochTicks + _ticks, DateTimeKind.Utc).ToLocalTime();
private const ulong MicrosecondsPerSecond = 1000000UL;
private const ulong HighFactor = 4294967296UL;
private readonly ulong _timestamp;
private const long EpochTicks = 621355968000000000L;
private const long TicksPerMicrosecond = 10L;
private readonly long _ticks;
public Timestamp(byte[] timestampBytes, bool reverseByteOrder)
{
if (timestampBytes == null)
throw new ArgumentNullException($"{nameof(timestampBytes)} cannot be null.");
if (timestampBytes.Length != 8)
throw new ArgumentException($"{nameof(timestampBytes)} must have a length of 8.");
TimestampHigh = BitConverter.ToUInt32(timestampBytes, 0).ReverseByteOrder(reverseByteOrder);
TimestampLow = BitConverter.ToUInt32(timestampBytes, 4).ReverseByteOrder(reverseByteOrder);
_timestamp = ((ulong)TimestampHigh * HighFactor) + (ulong)TimestampLow;
_ticks = (long)_timestamp * TicksPerMicrosecond;
Seconds = (uint)(_timestamp / MicrosecondsPerSecond);
Microseconds = (uint)(_timestamp % MicrosecondsPerSecond);
}
public Timestamp(uint seconds, uint microseconds)
{
Seconds = seconds;
Microseconds = microseconds;
_timestamp = seconds * MicrosecondsPerSecond + microseconds;
_ticks = (long)_timestamp * TicksPerMicrosecond;
TimestampHigh = (uint)(_timestamp / HighFactor);
TimestampLow = (uint)(_timestamp % HighFactor);
}
public byte[] ConvertToBytes(bool reverseByteOrder)
{
List<byte> bytes = new List<byte>();
bytes.AddRange(BitConverter.GetBytes(TimestampHigh.ReverseByteOrder(reverseByteOrder)));
bytes.AddRange(BitConverter.GetBytes(TimestampLow.ReverseByteOrder(reverseByteOrder)));
return bytes.ToArray();
}
public bool Equals(Timestamp other)
{
return TimestampLow == other.TimestampLow && TimestampHigh == other.TimestampHigh;
}
public static bool operator ==(Timestamp left, Timestamp right)
{
return left.Equals(right);
}
public static bool operator !=(Timestamp left, Timestamp right)
{
return !left.Equals(right);
}
public override bool Equals(object obj)
{
return obj is Timestamp other && Equals(other);
}
public override int GetHashCode()
{
return _timestamp.GetHashCode();
}
}
The method ReverseByteOrder does not seem to incur much of a performance penalty as it represents less than 0.5% of the time according to dotTrace, however here it is for reference:
public static UInt32 ReverseByteOrder(this UInt32 value, bool reverseByteOrder)
{
if (!reverseByteOrder)
{
return value;
}
else
{
byte[] bytes = BitConverter.GetBytes(value);
Array.Reverse(bytes);
return BitConverter.ToUInt32(bytes, 0);
}
}
question from:
https://stackoverflow.com/questions/65912314/improving-performance-converting-bytes-into-uint32 与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…