After testing about 10 billion times, if imm64
is 0.1 nanoseconds faster than m64
for AMD64, The m64
seems to be faster, but I don't really understand. Isn't the address of val_ptr
in the following code an immediate value itself?
# Text section
.section __TEXT,__text,regular,pure_instructions
# 64-bit code
.code64
# Intel syntax
.intel_syntax noprefix
# Target macOS High Sierra
.macosx_version_min 10,13,0
# Make those two test functions global for the C measurer
.globl _test1
.globl _test2
# Test 1, imm64
_test1:
# Move the immediate value 0xDEADBEEFFEEDFACE to RAX (return value)
movabs rax, 0xDEADBEEFFEEDFACE
ret
# Test 2, m64
_test2:
# Move from the RAM (val_ptr) to RAX (return value)
mov rax, qword ptr [rip + val_ptr]
ret
# Data section
.section __DATA,__data
val_ptr:
.quad 0xDEADBEEFFEEDFACE
The measurement code is:
#include <stdio.h> // For printf
#include <stdlib.h> // For EXIT_SUCCESS
#include <math.h> // For fabs
#include <stdint.h> // For uint64_t
#include <stddef.h> // For size_t
#include <string.h> // For memset
#include <mach/mach_time.h> // For time stuff
#define FUNCTION_COUNT 2 // Number of functions to test
#define TEST_COUNT 0x10000000 // Number of times to test each function
// Type aliases
typedef uint64_t rettype_t;
typedef rettype_t(*function_t)();
// External test functions (defined in Assembly)
rettype_t test1();
rettype_t test2();
// Program entry point
int main() {
// Time measurement stuff
mach_timebase_info_data_t info;
mach_timebase_info(&info);
// Sums to divide by the test count to get average
double sums[FUNCTION_COUNT];
// Initialize sums to 0
memset(&sums, 0, FUNCTION_COUNT * sizeof (double));
// Functions to test
function_t functions[FUNCTION_COUNT] = {test1, test2};
// Useless results (should be 0xDEADBEEFFEEDFACE), but good to have
rettype_t results[FUNCTION_COUNT];
// Function loop, may get unrolled based on optimization level
for (size_t test_fn = 0; test_fn < FUNCTION_COUNT; test_fn++) {
// Test this MANY times
for (size_t test_num = 0; test_num < TEST_COUNT; test_num++) {
// Get the nanoseconds before the action
double nanoseconds = mach_absolute_time();
// Do the action
results[test_fn] = functions[test_fn]();
// Measure the time it took
nanoseconds = mach_absolute_time() - nanoseconds;
// Convert it to nanoseconds
nanoseconds *= info.numer;
nanoseconds /= info.denom;
// Add the nanosecond count to the sum
sums[test_fn] += nanoseconds;
}
}
// Compute the average
for (size_t i = 0; i < FUNCTION_COUNT; i++) {
sums[i] /= TEST_COUNT;
}
if (FUNCTION_COUNT == 2) {
// Print some fancy information
printf("Test 1 took %f nanoseconds average.
", sums[0]);
printf("Test 2 took %f nanoseconds average.
", sums[1]);
printf("Test %d was faster, with %f nanoseconds difference
", sums[0] < sums[1] ? 1 : 2, fabs(sums[0] - sums[1]));
} else {
// Else, just print something
for (size_t fn_i = 0; fn_i < FUNCTION_COUNT; fn_i++) {
printf("Test %zu took %f clock ticks average.
", fn_i + 1, sums[fn_i]);
}
}
// Everything went fine!
return EXIT_SUCCESS;
}
So, which really is fastest, m64
or imm64
?
By the way, I'm using Intel Core i7 Ivy Bridge and DDR3 RAM. I'm running macOS High Sierra.
EDIT: I inserted the ret
instructions, and now imm64
turned out to be faster.
See Question&Answers more detail:
os 与恶龙缠斗过久,自身亦成为恶龙;凝视深渊过久,深渊将回以凝视…