Note that there are some explanatory texts on larger screens.

plurals
  1. PO
    primarykey
    data
    text
    <p>I've done this exact task before. But it was mainly to measure power consumption and CPU temperatures. The following code (which is fairly long) achieves close to optimal on my Core i7 2600K.</p> <p>The key thing to note here is the massive amount of manual loop-unrolling as well as interleaving of multiplies and adds...</p> <p>The full project can be found on my GitHub: <a href="https://github.com/Mysticial/Flops">https://github.com/Mysticial/Flops</a></p> <h1>Warning:</h1> <p><strong>If you decide to compile and run this, pay attention to your CPU temperatures!!!</strong><br>Make sure you don't overheat it. And make sure CPU-throttling doesn't affect your results!</p> <p>Furthermore, I take no responsibility for whatever damage that may result from running this code.</p> <p><strong>Notes:</strong></p> <ul> <li>This code is optimized for x64. x86 doesn't have enough registers for this to compile well.</li> <li>This code has been tested to work well on Visual Studio 2010/2012 and GCC 4.6.<br>ICC 11 (Intel Compiler 11) surprisingly has trouble compiling it well.</li> <li>These are for pre-FMA processors. In order to achieve peak FLOPS on Intel Haswell and AMD Bulldozer processors (and later), FMA (Fused Multiply Add) instructions will be needed. These are beyond the scope of this benchmark.</li> </ul> <p></p> <pre><code>#include &lt;emmintrin.h&gt; #include &lt;omp.h&gt; #include &lt;iostream&gt; using namespace std; typedef unsigned long long uint64; double test_dp_mac_SSE(double x,double y,uint64 iterations){ register __m128d r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,rA,rB,rC,rD,rE,rF; // Generate starting data. r0 = _mm_set1_pd(x); r1 = _mm_set1_pd(y); r8 = _mm_set1_pd(-0.0); r2 = _mm_xor_pd(r0,r8); r3 = _mm_or_pd(r0,r8); r4 = _mm_andnot_pd(r8,r0); r5 = _mm_mul_pd(r1,_mm_set1_pd(0.37796447300922722721)); r6 = _mm_mul_pd(r1,_mm_set1_pd(0.24253562503633297352)); r7 = _mm_mul_pd(r1,_mm_set1_pd(4.1231056256176605498)); r8 = _mm_add_pd(r0,_mm_set1_pd(0.37796447300922722721)); r9 = _mm_add_pd(r1,_mm_set1_pd(0.24253562503633297352)); rA = _mm_sub_pd(r0,_mm_set1_pd(4.1231056256176605498)); rB = _mm_sub_pd(r1,_mm_set1_pd(4.1231056256176605498)); rC = _mm_set1_pd(1.4142135623730950488); rD = _mm_set1_pd(1.7320508075688772935); rE = _mm_set1_pd(0.57735026918962576451); rF = _mm_set1_pd(0.70710678118654752440); uint64 iMASK = 0x800fffffffffffffull; __m128d MASK = _mm_set1_pd(*(double*)&amp;iMASK); __m128d vONE = _mm_set1_pd(1.0); uint64 c = 0; while (c &lt; iterations){ size_t i = 0; while (i &lt; 1000){ // Here's the meat - the part that really matters. r0 = _mm_mul_pd(r0,rC); r1 = _mm_add_pd(r1,rD); r2 = _mm_mul_pd(r2,rE); r3 = _mm_sub_pd(r3,rF); r4 = _mm_mul_pd(r4,rC); r5 = _mm_add_pd(r5,rD); r6 = _mm_mul_pd(r6,rE); r7 = _mm_sub_pd(r7,rF); r8 = _mm_mul_pd(r8,rC); r9 = _mm_add_pd(r9,rD); rA = _mm_mul_pd(rA,rE); rB = _mm_sub_pd(rB,rF); r0 = _mm_add_pd(r0,rF); r1 = _mm_mul_pd(r1,rE); r2 = _mm_sub_pd(r2,rD); r3 = _mm_mul_pd(r3,rC); r4 = _mm_add_pd(r4,rF); r5 = _mm_mul_pd(r5,rE); r6 = _mm_sub_pd(r6,rD); r7 = _mm_mul_pd(r7,rC); r8 = _mm_add_pd(r8,rF); r9 = _mm_mul_pd(r9,rE); rA = _mm_sub_pd(rA,rD); rB = _mm_mul_pd(rB,rC); r0 = _mm_mul_pd(r0,rC); r1 = _mm_add_pd(r1,rD); r2 = _mm_mul_pd(r2,rE); r3 = _mm_sub_pd(r3,rF); r4 = _mm_mul_pd(r4,rC); r5 = _mm_add_pd(r5,rD); r6 = _mm_mul_pd(r6,rE); r7 = _mm_sub_pd(r7,rF); r8 = _mm_mul_pd(r8,rC); r9 = _mm_add_pd(r9,rD); rA = _mm_mul_pd(rA,rE); rB = _mm_sub_pd(rB,rF); r0 = _mm_add_pd(r0,rF); r1 = _mm_mul_pd(r1,rE); r2 = _mm_sub_pd(r2,rD); r3 = _mm_mul_pd(r3,rC); r4 = _mm_add_pd(r4,rF); r5 = _mm_mul_pd(r5,rE); r6 = _mm_sub_pd(r6,rD); r7 = _mm_mul_pd(r7,rC); r8 = _mm_add_pd(r8,rF); r9 = _mm_mul_pd(r9,rE); rA = _mm_sub_pd(rA,rD); rB = _mm_mul_pd(rB,rC); i++; } // Need to renormalize to prevent denormal/overflow. r0 = _mm_and_pd(r0,MASK); r1 = _mm_and_pd(r1,MASK); r2 = _mm_and_pd(r2,MASK); r3 = _mm_and_pd(r3,MASK); r4 = _mm_and_pd(r4,MASK); r5 = _mm_and_pd(r5,MASK); r6 = _mm_and_pd(r6,MASK); r7 = _mm_and_pd(r7,MASK); r8 = _mm_and_pd(r8,MASK); r9 = _mm_and_pd(r9,MASK); rA = _mm_and_pd(rA,MASK); rB = _mm_and_pd(rB,MASK); r0 = _mm_or_pd(r0,vONE); r1 = _mm_or_pd(r1,vONE); r2 = _mm_or_pd(r2,vONE); r3 = _mm_or_pd(r3,vONE); r4 = _mm_or_pd(r4,vONE); r5 = _mm_or_pd(r5,vONE); r6 = _mm_or_pd(r6,vONE); r7 = _mm_or_pd(r7,vONE); r8 = _mm_or_pd(r8,vONE); r9 = _mm_or_pd(r9,vONE); rA = _mm_or_pd(rA,vONE); rB = _mm_or_pd(rB,vONE); c++; } r0 = _mm_add_pd(r0,r1); r2 = _mm_add_pd(r2,r3); r4 = _mm_add_pd(r4,r5); r6 = _mm_add_pd(r6,r7); r8 = _mm_add_pd(r8,r9); rA = _mm_add_pd(rA,rB); r0 = _mm_add_pd(r0,r2); r4 = _mm_add_pd(r4,r6); r8 = _mm_add_pd(r8,rA); r0 = _mm_add_pd(r0,r4); r0 = _mm_add_pd(r0,r8); // Prevent Dead Code Elimination double out = 0; __m128d temp = r0; out += ((double*)&amp;temp)[0]; out += ((double*)&amp;temp)[1]; return out; } void test_dp_mac_SSE(int tds,uint64 iterations){ double *sum = (double*)malloc(tds * sizeof(double)); double start = omp_get_wtime(); #pragma omp parallel num_threads(tds) { double ret = test_dp_mac_SSE(1.1,2.1,iterations); sum[omp_get_thread_num()] = ret; } double secs = omp_get_wtime() - start; uint64 ops = 48 * 1000 * iterations * tds * 2; cout &lt;&lt; "Seconds = " &lt;&lt; secs &lt;&lt; endl; cout &lt;&lt; "FP Ops = " &lt;&lt; ops &lt;&lt; endl; cout &lt;&lt; "FLOPs = " &lt;&lt; ops / secs &lt;&lt; endl; double out = 0; int c = 0; while (c &lt; tds){ out += sum[c++]; } cout &lt;&lt; "sum = " &lt;&lt; out &lt;&lt; endl; cout &lt;&lt; endl; free(sum); } int main(){ // (threads, iterations) test_dp_mac_SSE(8,10000000); system("pause"); } </code></pre> <p><strong>Output (1 thread, 10000000 iterations) - Compiled with Visual Studio 2010 SP1 - x64 Release:</strong></p> <pre><code>Seconds = 55.5104 FP Ops = 960000000000 FLOPs = 1.7294e+010 sum = 2.22652 </code></pre> <p>The machine is a Core i7 2600K @ 4.4 GHz. Theoretical SSE peak is 4 flops * 4.4 GHz = <strong>17.6 GFlops</strong>. This code achieves <strong>17.3 GFlops</strong> - not bad.</p> <p><strong>Output (8 threads, 10000000 iterations) - Compiled with Visual Studio 2010 SP1 - x64 Release:</strong></p> <pre><code>Seconds = 117.202 FP Ops = 7680000000000 FLOPs = 6.55279e+010 sum = 17.8122 </code></pre> <p>Theoretical SSE peak is 4 flops * 4 cores * 4.4 GHz = <strong>70.4 GFlops.</strong> Actual is <strong>65.5 GFlops</strong>.</p> <hr> <h2>Let's take this one step further. AVX...</h2> <pre><code>#include &lt;immintrin.h&gt; #include &lt;omp.h&gt; #include &lt;iostream&gt; using namespace std; typedef unsigned long long uint64; double test_dp_mac_AVX(double x,double y,uint64 iterations){ register __m256d r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,rA,rB,rC,rD,rE,rF; // Generate starting data. r0 = _mm256_set1_pd(x); r1 = _mm256_set1_pd(y); r8 = _mm256_set1_pd(-0.0); r2 = _mm256_xor_pd(r0,r8); r3 = _mm256_or_pd(r0,r8); r4 = _mm256_andnot_pd(r8,r0); r5 = _mm256_mul_pd(r1,_mm256_set1_pd(0.37796447300922722721)); r6 = _mm256_mul_pd(r1,_mm256_set1_pd(0.24253562503633297352)); r7 = _mm256_mul_pd(r1,_mm256_set1_pd(4.1231056256176605498)); r8 = _mm256_add_pd(r0,_mm256_set1_pd(0.37796447300922722721)); r9 = _mm256_add_pd(r1,_mm256_set1_pd(0.24253562503633297352)); rA = _mm256_sub_pd(r0,_mm256_set1_pd(4.1231056256176605498)); rB = _mm256_sub_pd(r1,_mm256_set1_pd(4.1231056256176605498)); rC = _mm256_set1_pd(1.4142135623730950488); rD = _mm256_set1_pd(1.7320508075688772935); rE = _mm256_set1_pd(0.57735026918962576451); rF = _mm256_set1_pd(0.70710678118654752440); uint64 iMASK = 0x800fffffffffffffull; __m256d MASK = _mm256_set1_pd(*(double*)&amp;iMASK); __m256d vONE = _mm256_set1_pd(1.0); uint64 c = 0; while (c &lt; iterations){ size_t i = 0; while (i &lt; 1000){ // Here's the meat - the part that really matters. r0 = _mm256_mul_pd(r0,rC); r1 = _mm256_add_pd(r1,rD); r2 = _mm256_mul_pd(r2,rE); r3 = _mm256_sub_pd(r3,rF); r4 = _mm256_mul_pd(r4,rC); r5 = _mm256_add_pd(r5,rD); r6 = _mm256_mul_pd(r6,rE); r7 = _mm256_sub_pd(r7,rF); r8 = _mm256_mul_pd(r8,rC); r9 = _mm256_add_pd(r9,rD); rA = _mm256_mul_pd(rA,rE); rB = _mm256_sub_pd(rB,rF); r0 = _mm256_add_pd(r0,rF); r1 = _mm256_mul_pd(r1,rE); r2 = _mm256_sub_pd(r2,rD); r3 = _mm256_mul_pd(r3,rC); r4 = _mm256_add_pd(r4,rF); r5 = _mm256_mul_pd(r5,rE); r6 = _mm256_sub_pd(r6,rD); r7 = _mm256_mul_pd(r7,rC); r8 = _mm256_add_pd(r8,rF); r9 = _mm256_mul_pd(r9,rE); rA = _mm256_sub_pd(rA,rD); rB = _mm256_mul_pd(rB,rC); r0 = _mm256_mul_pd(r0,rC); r1 = _mm256_add_pd(r1,rD); r2 = _mm256_mul_pd(r2,rE); r3 = _mm256_sub_pd(r3,rF); r4 = _mm256_mul_pd(r4,rC); r5 = _mm256_add_pd(r5,rD); r6 = _mm256_mul_pd(r6,rE); r7 = _mm256_sub_pd(r7,rF); r8 = _mm256_mul_pd(r8,rC); r9 = _mm256_add_pd(r9,rD); rA = _mm256_mul_pd(rA,rE); rB = _mm256_sub_pd(rB,rF); r0 = _mm256_add_pd(r0,rF); r1 = _mm256_mul_pd(r1,rE); r2 = _mm256_sub_pd(r2,rD); r3 = _mm256_mul_pd(r3,rC); r4 = _mm256_add_pd(r4,rF); r5 = _mm256_mul_pd(r5,rE); r6 = _mm256_sub_pd(r6,rD); r7 = _mm256_mul_pd(r7,rC); r8 = _mm256_add_pd(r8,rF); r9 = _mm256_mul_pd(r9,rE); rA = _mm256_sub_pd(rA,rD); rB = _mm256_mul_pd(rB,rC); i++; } // Need to renormalize to prevent denormal/overflow. r0 = _mm256_and_pd(r0,MASK); r1 = _mm256_and_pd(r1,MASK); r2 = _mm256_and_pd(r2,MASK); r3 = _mm256_and_pd(r3,MASK); r4 = _mm256_and_pd(r4,MASK); r5 = _mm256_and_pd(r5,MASK); r6 = _mm256_and_pd(r6,MASK); r7 = _mm256_and_pd(r7,MASK); r8 = _mm256_and_pd(r8,MASK); r9 = _mm256_and_pd(r9,MASK); rA = _mm256_and_pd(rA,MASK); rB = _mm256_and_pd(rB,MASK); r0 = _mm256_or_pd(r0,vONE); r1 = _mm256_or_pd(r1,vONE); r2 = _mm256_or_pd(r2,vONE); r3 = _mm256_or_pd(r3,vONE); r4 = _mm256_or_pd(r4,vONE); r5 = _mm256_or_pd(r5,vONE); r6 = _mm256_or_pd(r6,vONE); r7 = _mm256_or_pd(r7,vONE); r8 = _mm256_or_pd(r8,vONE); r9 = _mm256_or_pd(r9,vONE); rA = _mm256_or_pd(rA,vONE); rB = _mm256_or_pd(rB,vONE); c++; } r0 = _mm256_add_pd(r0,r1); r2 = _mm256_add_pd(r2,r3); r4 = _mm256_add_pd(r4,r5); r6 = _mm256_add_pd(r6,r7); r8 = _mm256_add_pd(r8,r9); rA = _mm256_add_pd(rA,rB); r0 = _mm256_add_pd(r0,r2); r4 = _mm256_add_pd(r4,r6); r8 = _mm256_add_pd(r8,rA); r0 = _mm256_add_pd(r0,r4); r0 = _mm256_add_pd(r0,r8); // Prevent Dead Code Elimination double out = 0; __m256d temp = r0; out += ((double*)&amp;temp)[0]; out += ((double*)&amp;temp)[1]; out += ((double*)&amp;temp)[2]; out += ((double*)&amp;temp)[3]; return out; } void test_dp_mac_AVX(int tds,uint64 iterations){ double *sum = (double*)malloc(tds * sizeof(double)); double start = omp_get_wtime(); #pragma omp parallel num_threads(tds) { double ret = test_dp_mac_AVX(1.1,2.1,iterations); sum[omp_get_thread_num()] = ret; } double secs = omp_get_wtime() - start; uint64 ops = 48 * 1000 * iterations * tds * 4; cout &lt;&lt; "Seconds = " &lt;&lt; secs &lt;&lt; endl; cout &lt;&lt; "FP Ops = " &lt;&lt; ops &lt;&lt; endl; cout &lt;&lt; "FLOPs = " &lt;&lt; ops / secs &lt;&lt; endl; double out = 0; int c = 0; while (c &lt; tds){ out += sum[c++]; } cout &lt;&lt; "sum = " &lt;&lt; out &lt;&lt; endl; cout &lt;&lt; endl; free(sum); } int main(){ // (threads, iterations) test_dp_mac_AVX(8,10000000); system("pause"); } </code></pre> <p><strong>Output (1 thread, 10000000 iterations) - Compiled with Visual Studio 2010 SP1 - x64 Release:</strong></p> <pre><code>Seconds = 57.4679 FP Ops = 1920000000000 FLOPs = 3.34099e+010 sum = 4.45305 </code></pre> <p>Theoretical AVX peak is 8 flops * 4.4 GHz = <strong>35.2 GFlops</strong>. Actual is <strong>33.4 GFlops</strong>.</p> <p><strong>Output (8 threads, 10000000 iterations) - Compiled with Visual Studio 2010 SP1 - x64 Release:</strong></p> <pre><code>Seconds = 111.119 FP Ops = 15360000000000 FLOPs = 1.3823e+011 sum = 35.6244 </code></pre> <p>Theoretical AVX peak is 8 flops * 4 cores * 4.4 GHz = <strong>140.8 GFlops.</strong> Actual is <strong>138.2 GFlops</strong>.</p> <hr> <p><strong>Now for some explanations:</strong></p> <p>The performance critical part is obviously the 48 instructions inside the inner loop. You'll notice that it's broken into 4 blocks of 12 instructions each. Each of these 12 instructions blocks are completely independent from each other - and take on average 6 cycles to execute.</p> <p>So there's 12 instructions and 6 cycles between issue-to-use. The latency of multiplication is 5 cycles, so it's just enough to avoid latency stalls.</p> <p>The normalization step is needed to keep the data from over/underflowing. This is needed since the do-nothing code will slowly increase/decrease the magnitude of the data.</p> <p>So it's actually possible to do better than this if you just use all zeros and get rid of the normalization step. However, since I wrote the benchmark to measure power consumption and temperature, <strong>I had to make sure the flops were on "real" data, rather than zeros</strong> - as the execution units may very well have special case-handling for zeros that use less power and produce less heat.</p> <hr> <h2>More Results:</h2> <ul> <li><strong>Intel Core i7 920 @ 3.5 GHz</strong></li> <li>Windows 7 Ultimate x64</li> <li>Visual Studio 2010 SP1 - x64 Release</li> </ul> <p><strong>Threads: 1</strong></p> <pre><code>Seconds = 72.1116 FP Ops = 960000000000 FLOPs = 1.33127e+010 sum = 2.22652 </code></pre> <p>Theoretical SSE Peak: 4 flops * 3.5 GHz = <strong>14.0 GFlops</strong>. Actual is <strong>13.3 GFlops</strong>.</p> <p><strong>Threads: 8</strong></p> <pre><code>Seconds = 149.576 FP Ops = 7680000000000 FLOPs = 5.13452e+010 sum = 17.8122 </code></pre> <p>Theoretical SSE Peak: 4 flops * 4 cores * 3.5 GHz = <strong>56.0 GFlops</strong>. Actual is <strong>51.3 GFlops</strong>.</p> <p><em><strong>My processor temps hit 76C on the multi-threaded run! If you runs these, be sure the results aren't affected by CPU throttling.</em></strong></p> <hr> <ul> <li><strong>2 x Intel Xeon X5482 Harpertown @ 3.2 GHz</strong></li> <li>Ubuntu Linux 10 x64</li> <li>GCC 4.5.2 x64 - (-O2 -msse3 -fopenmp)</li> </ul> <p><strong>Threads: 1</strong></p> <pre><code>Seconds = 78.3357 FP Ops = 960000000000 FLOPs = 1.22549e+10 sum = 2.22652 </code></pre> <p>Theoretical SSE Peak: 4 flops * 3.2 GHz = <strong>12.8 GFlops</strong>. Actual is <strong>12.3 GFlops</strong>.</p> <p><strong>Threads: 8</strong></p> <pre><code>Seconds = 78.4733 FP Ops = 7680000000000 FLOPs = 9.78676e+10 sum = 17.8122 </code></pre> <p>Theoretical SSE Peak: 4 flops * 8 cores * 3.2 GHz = <strong>102.4 GFlops</strong>. Actual is <strong>97.9 GFlops</strong>.</p>
    singulars
    1. This table or related slice is empty.
    plurals
    1. This table or related slice is empty.
    1. This table or related slice is empty.
    1. This table or related slice is empty.
    1. VO
      singulars
      1. This table or related slice is empty.
    2. VO
      singulars
      1. This table or related slice is empty.
    3. VO
      singulars
      1. This table or related slice is empty.
 

Querying!

 
Guidance

SQuiL has stopped working due to an internal error.

If you are curious you may find further information in the browser console, which is accessible through the devtools (F12).

Reload