# Invocation command line: # /home/cpu2017_B1/bin/harness/runcpu --configfile amd_speed_aocc300_milan_B1.cfg --tune all --reportable --iterations 3 --nopower --runmode speed --tune base:peak --size test:train:refspeed intspeed # output_root was not used for this run ############################################################################ ################################################################################ # AMD AOCC 300 SPEC CPU2017 V1.1.5 Speed Configuration File for 64-bit Linux # # File name : amd_speed_aocc300_milan_B1.cfg # Creation Date : February 23, 2021 # CPU2017 Version : 1.1.5 # Supported benchmarks : All Speed benchmarks (intspeed, fpspeed) # Compiler name/version : AOCC 3.0.0 # Operating system version : OpenSUSE 15.2 # Supported OS's : Ubuntu 20.04, RHEL 8.3, SLES 15 SP2 # Hardware : AMD Milan, Rome, Naples (AMD64) # FP Base Pointer Size : 64-bit # FP Peak Pointer Size : 64-bit # INT Base Pointer Size : 64-bit # INT Peak Pointer Size : 64-bit # Auto Parallization : No # # Note: DO NOT EDIT THIS FILE, the only edits required to properly run these # binaries are made in the ini Python file. Please consult Readme.amd_speed_aocc300_milan_B1.txt # for a few uncommon exceptions which require edits to this file. # # Description: # # This binary package automates away many of the complexities necessary to set # up and run SPEC CPU2017 under optimized conditions on AMD Milan/Rome/Naples-based # server platforms within Linux (AMD64). # # The binary package was built specifically for AMD Milan/Rome/Naples microprocessors and # is not intended to run on other products. # # Please install the binary package by following the instructions in # "Readme.amd_speed_aocc300_milan_B1.txt" under the "How To Use the Binaries" section. # # The binary package is designed to work without alteration on two socket AMD # Milan/Rome/Naples-based servers with 64 cores per socket, SMT enabled and 1 TiB of DDR4 # memory distributed evenly among all 16 channels using 32 GiB DIMMs. # # To run the binary package on other Milan/Rome/Naples configurations, please review # "Readme.amd_speed_aocc300_milan_B1.txt". In general, Milan/Rome or Naples CPUs # should be autodetected with no action required by the user. # # In most cases, it should be unnecessary to edit "amd_speed_aocc300_milan_B1.cfg" or any # other file besides "ini_amd_speed_aocc300_milan_B1.py" where reporting fields # and run conditions are set. # # The run script automatically sets the optimal number of speed copies and binds # them appropriately. # # The run script and accompanying binary package are designed to work on Ubuntu # 20.04, RHEL 8.3 and SLES 15 SP2. # # Important! If you write your own run script, please set the stack size to # "unlimited" when executing this binary package. Failure to do so may cause # some benchmarks to overflow the stack. For example, to set stack size within # the bash shell, include the following line somewhere at the top of your run # script before the runcpu invocation: # # ulimit -s unlimited # # Modification of this config file should only be necessary if you intend to # rebuild the binaries. General instructions for rebuilding the binaries are # found in-line below. # ################################################################################ # Modifiable macros: ################################################################################ # Change the following line to true if you intend to REBUILD the binaries (AMD # does not support this). Valid values are "true" or "false" (no quotes). %define allow_build false # Only change these macros if you are rebuilding the binary package: %define compiler_name aocc300 %define binary_package_name amd_speed_%{compiler_name}_milan_B %define binary_package_revision 1 %define build_path /sppo/bin/cpu2017v115aocc3-b1/ %define flags_file_name %{compiler_name}-flags-B1.xml # To enable the platform file, be sure to uncomment the flagsurl02 header line # below. %define platform_file_name INVALID_platform_%{binary_package_name}.xml # You should never have to change binary_package_full_name: %define binary_package_full_name %{binary_package_name}%{binary_package_revision} ################################################################################ # Include file name ################################################################################ # The include file contains fields that are commonly changed. This file is auto- # generated based upon INI file settings and should not need user modification # for runs. %define inc_file_name %{binary_package_full_name}.inc ################################################################################ # Binary label extension and "allow_build"" switch ################################################################################ # Only modify the binary label extension if you plan to rebuild the binaries. %define ext %{binary_package_name} # If you plan to recompile these CPU2017 binaries, please choose a new extension # name (ext above) to avoid confusion with the current binary set on your system # under test, and to avoid confusion for SPEC submission reviewers. You will # also need to set "allow_build" to true below. Finally, you must modify the # Paths section below to point to your library locations if the paths are not # already set up in your build environment. ################################################################################ # Paths and Environment Variables # ** MODIFY AS NEEDED (modification should not be necessary for runs) ** ################################################################################ # Allow environment variables to be set before runs: preenv = 1 # Necessary to avoid gcc out-of-memory exceptions on certain SUTs: preENV_MALLOC_CONF = retain:true # OpenMP environment variables: preENV_OMP_SCHEDULE = static preENV_OMP_DYNAMIC = false preENV_OMP_STACKSIZE = 128M # Define the name of the directory that holds AMD library files: %define lib_dir %{binary_package_name}_lib # Set the shared object library path for runs and builds: preENV_LD_LIBRARY_PATH = $[top]/%{lib_dir}/64;$[top]/%{lib_dir}/32:%{ENV_LD_LIBRARY_PATH} # Define 32-bit library build paths: # Do not use $[top] with the 32-bit libraries because doing so will cause an # options checksum error triggering a xalanc recompile attempt on SUTs having # different file paths: JEMALLOC_LIB32_PATH = %{build_path}%{lib_dir}/32 %if '%{allow_build}' eq 'false' # The include file is only needed for runs, but not for builds. # include: %{inc_file_name} # ----- Begin inclusion of 'amd_speed_aocc300_milan_B1.inc' ############################################################################ ################################################################################ ################################################################################ # File name: amd_speed_aocc300_milan_B1.inc # File generation code date: January 25, 2021 # File generation date/time: April 01, 2020 / 12:26:01 # # This file is automatically generated during a SPEC CPU2017 run. # # To modify inc file generation, please consult the readme file or the run # script. ################################################################################ ################################################################################ ################################################################################ ################################################################################ # The following macros are generated for use in the cfg file. ################################################################################ ################################################################################ %define logical_core_count 128 %define physical_core_count 128 %define physical_core_max 127 %define logical_core_max 127 ################################################################################ ################################################################################ # The following inc blocks set the speed thread counts and affinity settings. # # intspeed benchmarks: 600.perlbench_s,602.gcc_s,605.mcf_s,620.omnetpp_s, # 623.xalancbmk_s,625.x264_s,631.deepsjeng_s,641.leela_s,648.exchange2_s, # 657.xz_s # fpspeed benchmarks: 603.bwaves_s,607.cactuBSSN_s,619.lbm_s,621.wrf_s, # 627.cam4_s,628.pop2_s,638.imagick_s,644.nab_s,649.fotonik3d_s, # 654.roms_s # # Selected thread counts from '7713' section of CPU info ################################################################################ # default preENV thread settings: default: preENV_OMP_THREAD_LIMIT = 128 preENV_GOMP_CPU_AFFINITY = 0-127 ################################################################################ ################################################################################ # intspeed base thread counts: intspeed=base: threads = 128 ENV_GOMP_CPU_AFFINITY = 0-127 bind0 = numactl --physcpubind=0-127 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # fpspeed base thread counts: fpspeed=base: threads = 128 ENV_GOMP_CPU_AFFINITY = 0-127 bind0 = numactl --physcpubind=0-127 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # peak thread counts: 1 600.perlbench_s,602.gcc_s,605.mcf_s,620.omnetpp_s,623.xalancbmk_s,625.x264_s,631.deepsjeng_s,641.leela_s,648.exchange2_s=peak: threads = 1 ENV_GOMP_CPU_AFFINITY = 0 bind0 = numactl --physcpubind=0 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # peak thread counts: 128 603.bwaves_s,607.cactuBSSN_s,619.lbm_s,621.wrf_s,627.cam4_s,628.pop2_s,638.imagick_s,649.fotonik3d_s,654.roms_s,657.xz_s=peak: threads = 128 ENV_GOMP_CPU_AFFINITY = 0-127 bind0 = numactl --physcpubind=0-127 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # peak thread counts: 128 644.nab_s=peak: threads = 128 ENV_GOMP_CPU_AFFINITY = 0 64 1 65 2 66 3 67 4 68 5 69 6 70 7 71 8 72 9 73 10 74 11 75 12 76 13 77 14 78 15 79 16 80 17 81 18 82 19 83 20 84 21 85 22 86 23 87 24 88 25 89 26 90 27 91 28 92 29 93 30 94 31 95 32 96 33 97 34 98 35 99 36 100 37 101 38 102 39 103 40 104 41 105 42 106 43 107 44 108 45 109 46 110 47 111 48 112 49 113 50 114 51 115 52 116 53 117 54 118 55 119 56 120 57 121 58 122 59 123 60 124 61 125 62 126 63 127 bind0 = numactl --physcpubind=0-127 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ ################################################################################ # Switch back to default: default: ################################################################################ ################################################################################ ################################################################################ # The remainder of this file defines CPU2017 report parameters. ################################################################################ ################################################################################ ################################################################################ # SPEC CPU 2017 report header ################################################################################ license_num =3 tester =HPE test_sponsor =HPE hw_vendor =Hewlett Packard Enterprise hw_model000 =ProLiant DL385 Gen10 Plus v2 hw_model001 =(2.00 GHz, AMD EPYC 7713) #--------- If you install new compilers, edit this section -------------------- sw_compiler =C/C++/Fortran: Version 3.0.0 of AOCC ################################################################################ ################################################################################ # Hardware, firmware and software information ################################################################################ hw_avail =Apr-2021 sw_avail =Mar-2021 hw_cpu_name =AMD EPYC 7713 hw_cpu_nominal_mhz =2000 hw_cpu_max_mhz =3675 hw_ncores =128 hw_nthreadspercore =1 hw_ncpuorder =1, 2 chip(s) hw_other =None # Other perf-relevant hw, or "None" fw_bios000 =HPE BIOS Version A42 v2.40 02/15/2021 released fw_bios001 = Feb-2021 sw_base_ptrsize =64-bit sw_peak_ptrsize = 64-bit hw_pcache =32 KB I + 32 KB D on chip per core hw_scache =512 KB I+D on chip per core hw_tcache000 =256 MB I+D on chip per chip, 32 MB shared / 8 hw_tcache001 = cores hw_ocache =None ################################################################################ # Notes ################################################################################ # Enter notes_000 through notes_100 here. notes_000 =Binaries were compiled on a system with 2x AMD EPYC 7742 CPU + 1TiB Memory using openSUSE 15.2 notes_005 = notes_010 =NA: The test sponsor attests, as of date of publication, that CVE-2017-5754 (Meltdown) notes_015 =is mitigated in the system as tested and documented. notes_020 =Yes: The test sponsor attests, as of date of publication, that CVE-2017-5753 (Spectre variant 1) notes_025 =is mitigated in the system as tested and documented. notes_030 =Yes: The test sponsor attests, as of date of publication, that CVE-2017-5715 (Spectre variant 2) notes_035 =is mitigated in the system as tested and documented. notes_040 = notes_submit_000 ='numactl' was used to bind copies to the cores. notes_submit_005 =See the configuration file for details. notes_os_000 ='ulimit -s unlimited' was used to set environment stack size notes_os_005 ='ulimit -l 2097152' was used to set environment locked pages in memory limit notes_os_010 = notes_os_015 =runcpu command invoked through numactl i.e.: notes_os_020 =numactl --interleave=all runcpu notes_os_025 = notes_os_030 ='echo 8 > /proc/sys/vm/dirty_ratio' run as root to limit dirty cache to 8% of notes_os_035 =memory. notes_os_040 ='echo 1 > /proc/sys/vm/swappiness' run as root to limit swap usage to minimum notes_os_045 =necessary. notes_os_050 ='echo 1 > /proc/sys/vm/zone_reclaim_mode' run as root to free node-local memory notes_os_055 =and avoid remote memory usage. notes_os_060 ='sync; echo 3 > /proc/sys/vm/drop_caches' run as root to reset filesystem caches. notes_os_065 ='sysctl -w kernel.randomize_va_space=0' run as root to disable address space layout notes_os_070 =randomization (ASLR) to reduce run-to-run variability. notes_os_075 ='echo always > /sys/kernel/mm/transparent_hugepage/enabled' and notes_os_016 =For peak, transparent huge pages set to 'never' for 627, 644, 649, 654 notes_os_080 ='echo always > /sys/kernel/mm/transparent_hugepage/defrag' run as root to enable notes_os_085 =Transparent Hugepages (THP) for this run. notes_os_090 ='echo madvise > /sys/kernel/mm/transparent_hugepage/enabled' run as root for peak notes_os_095 =runs of 628.pop2_s and 638.imagick_s to enable THP only on request. notes_comp_000 =The AMD64 AOCC Compiler Suite is available at notes_comp_005 =http://developer.amd.com/amd-aocc/ notes_comp_010 = notes_jemalloc_000 =jemalloc: configured and built with GCC v4.8.2 in RHEL 7.4 notes_jemalloc_005 =jemalloc 5.1.0 is available here: notes_jemalloc_010 =https://github.com/jemalloc/jemalloc/releases/download/5.1.0/jemalloc-5.1.0.tar.bz2 notes_jemalloc_015 = sw_other =jemalloc: jemalloc memory allocator library v5.1.0 ################################################################################ # The following note fields describe platorm settings. ################################################################################ # example: (uncomment as necessary) # notes_plat_000 =BIOS settings: # notes_plat_002 = cTDP: 280 # notes_plat_004 = Determinism Slider set to Power # notes_plat_006 = Package Power: 280 # notes_plat_008 = EDC: 300 # notes_plat_010 = NPS: 1 # notes_plat_014 = 4-link xGMI max speed: 16Gbps # notes_plat_015 = Fan Speed: Maximum ################################################################################ # The following are custom fields: ################################################################################ # Use custom_fields to enter lines that are not listed here. For example: # notes_plat_100 = Energy Bias set to Max Performance # new_field = Ambient temperature set to 10C ################################################################################ # The following fields must be set here for only Int benchmarks. ################################################################################ intrate: sw_peak_ptrsize =64-bit ################################################################################ # The following fields must be set here for FP benchmarks. ################################################################################ fprate: sw_peak_ptrsize =64-bit ################################################################################ # The following fields must be set here or they will be overwritten by sysinfo. ################################################################################ intrate,fprate: hw_disk =unknown hw_memory000 =1 TB (16 x 64 GB 4Rx4 PC4-3200V-L) hw_memory001 =running at 2933 hw_memory002 = hw_nchips =2 prepared_by =prepared by unknown sw_file =unknown file sw_os000 =unknown os000 sw_os001 =kernel version # ex: Kernel 4.4.0-87-generic sw_state =Run level 3 (multi-user) ################################################################################ # End of inc file ################################################################################ # Switch back to the default block after the include file: default: # ---- End inclusion of '/home/cpu2017_B1/config/amd_speed_aocc300_milan_B1.inc' # Switch back to default block after the include file: default: fail_build = 1 %elif '%{allow_build}' eq 'true' # If you intend to rebuild, be sure to set the library paths either in the # build script or here: preENV_LIBRARY_PATH = $[top]/%{lib_dir}/64;$[top]/%{lib_dir}/32:%{ENV_LIBRARY_PATH} % define build_ncpus 64 # controls number of simultaneous compiles fail_build = 0 makeflags = --jobs=%{build_ncpus} --load-average=%{build_ncpus} %else % error The value of "allow_build" is %{allow_build}, but it can only be "true" or "false". This error was generated %endif ################################################################################ # Enable automated data collection per benchmark ################################################################################ # Data collection is not enabled for reportable runs. # teeout is necessary to get data collection stdout into the logs. Best # practices for the individual data collection items would be to have # them store important output in separate files. Filenames could be # constructed from $SPEC (environment), $lognum (result number from runcpu), # and benchmark name/number. teeout = yes # Run runcpu with '-v 35' (or greater) to log lists of variables which can # be used in substitutions as below. # For CPU2006, change $label to $ext %define data-collection-parameters benchname='$name' benchnum='$num' benchmark='$benchmark' iteration=$iter size='$size' tune='$tune' label='$label' log='$log' lognum='$lognum' from_runcpu='$from_runcpu' %define data-collection-start $[top]/data-collection/data-collection start %{data-collection-parameters} %define data-collection-stop $[top]/data-collection/data-collection stop %{data-collection-parameters} monitor_specrun_wrapper = %{data-collection-start} ; $command ; %{data-collection-stop} ################################################################################ # Header settings ################################################################################ backup_config = 0 # set to 0 if you do not want backup files bench_post_setup = sync # command_add_redirect: If set, the generated ${command} will include # redirection operators (stdout, stderr), which are passed along to the shell # that executes the command. If this variable is not set, specinvoke does the # redirection. command_add_redirect = yes env_vars = yes flagsurl000 = http://www.spec.org/cpu2017/flags/HPE-Platform-Flags-AMD-V1.2-EPYC-revP.xml flagsurl001 = http://www.spec.org/cpu2017/flags/aocc300-flags-A1.xml #flagsurl02 = $[top]/%{platform_file_name} # label: User defined extension string that tags your binaries & directories: label = %{ext} line_width = 1020 log_line_width = 1020 mean_anyway = yes output_format = all reportable = yes size = test,train,ref teeout = yes teerunout = yes tune = base,peak use_submit_for_speed = yes ################################################################################ # Compilers ################################################################################ default: CROSSPLAT_PORT_OPTS = -mno-adx -mno-sse4a CC = clang -m64 $[CROSSPLAT_PORT_OPTS] CXX = clang++ -m64 -std=c++98 $[CROSSPLAT_PORT_OPTS] FC = flang -m64 $[CROSSPLAT_PORT_OPTS] CLD = clang -m64 CXXLD = clang++ -m64 FLD = flang -m64 CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version FC_VERSION_OPTION = --version ################################################################################ # Portability Flags ################################################################################ default:# data model applies to all benchmarks ################################################################################ # Default Flags ################################################################################ EXTRA_LIBS = -fopenmp=libomp -lomp -ljemalloc -lamdlibm -lm MATHLIBOPT = #clearing this variable or else SPEC will set it to -lm VECMATHLIB = -fveclib=AMDLIBM OPT_ROOT = -march=znver3 $(VECMATHLIB) -ffast-math OPT_ROOT_BASE = -O3 $(OPT_ROOT) OPT_ROOT_PEAK = -Ofast $(OPT_ROOT) -flto ################################################################################ # Portability Flags ################################################################################ default: EXTRA_PORTABILITY = -DSPEC_LP64 # *** Benchmark-specific portability *** # Anything other than the data model is only allowed where a need is proven. # (ordered by last 2 digits of benchmark number) 600.perlbench_s: #lang='C' PORTABILITY = -DSPEC_LINUX_X64 621.wrf_s: #lang='F,C' CPORTABILITY = -DSPEC_CASE_FLAG FPORTABILITY = -Mbyteswapio 623.xalancbmk_s: #lang='CXX' PORTABILITY = -DSPEC_LINUX 627.cam4_s: #lang='F,C' PORTABILITY = -DSPEC_CASE_FLAG 628.pop2_s: #lang='F,C' CPORTABILITY = -DSPEC_CASE_FLAG FPORTABILITY = -Mbyteswapio ################################################################################ # Tuning Flags ################################################################################ ##################### # Base tuning flags # ##################### default=base: COPTIMIZE = $(OPT_ROOT_BASE) -flto -fstruct-layout=5 \ -mllvm -unroll-threshold=50 \ -mllvm -inline-threshold=1000 -fremap-arrays \ -mllvm -function-specialize -flv-function-specialization \ -mllvm -enable-gvn-hoist \ -mllvm -global-vectorize-slp=true \ -mllvm -enable-licm-vrp \ -mllvm -reduce-array-computations=3 \ -Wno-unused-command-line-argument CXXOPTIMIZE = $(OPT_ROOT_BASE) -flto \ -mllvm -enable-partial-unswitch \ -mllvm -unroll-threshold=100 \ -finline-aggressive -flv-function-specialization \ -mllvm -loop-unswitch-threshold=200000 \ -mllvm -reroll-loops \ -mllvm -aggressive-loop-unswitch \ -mllvm -extra-vectorizer-passes \ -mllvm -reduce-array-computations=3 \ -mllvm -global-vectorize-slp=true \ -Wno-unused-command-line-argument \ -mllvm -convert-pow-exp-to-int=false FOPTIMIZE = -Hz,1,0x1 $(OPT_ROOT_BASE) -Mrecursive \ -mllvm -fuse-tile-inner-loop -funroll-loops \ -mllvm -extra-vectorizer-passes \ -mllvm -lsr-in-nested-loop \ -mllvm -enable-licm-vrp \ -mllvm -reduce-array-computations=3 \ -mllvm -global-vectorize-slp=true \ -Wno-unused-command-line-argument LDCXXFLAGS = -Wl,-mllvm -Wl,-x86-use-vzeroupper=false EXTRA_LDFLAGS = -Wl,-mllvm -Wl,-region-vectorize \ -Wl,-mllvm -Wl,-function-specialize \ -Wl,-mllvm -Wl,-align-all-nofallthru-blocks=6 \ -Wl,-mllvm -Wl,-reduce-array-computations=3 LDFFLAGS = -Wl,-mllvm -Wl,-enable-X86-prefetching \ -Wl,-mllvm -Wl,-enable-licm-vrp #other libraries # Put OpenMP and math libraries here: # -lm needed at the end for some transcendental functions: EXTRA_LIBS = -fopenmp=libomp -lomp -lamdlibm -ljemalloc -lflang -lflangrti -lm EXTRA_FLIBS = # Don't put the AMD and mvec math libraries in MATHLIBOPT because it will trigger a reporting issue # because GCC won't use them. Forcefeed all benchmarks the math libraries in EXTRA_LIBS and clear # out MATHLIBOPT. MATHLIBOPT = # The following is necessary for 502/602 gcc: LDOPTIMIZE = -z muldefs # The following is necessary for 502/602 gcc: EXTRA_OPTIMIZE = -DSPEC_OPENMP -fopenmp -Wno-return-type ######################## # intspeed tuning flags # ######################## intspeed: FOPTIMIZE = $(OPT_ROOT_BASE) -flto EXTRA_FFLAGS = -mllvm -unroll-aggressive \ -mllvm -unroll-threshold=150 EXTRA_CXXFLAGS = -mllvm -do-block-reorder=aggressive \ -fvirtual-function-elimination -fvisibility=hidden LDCFLAGS = -Wl,-allow-multiple-definition -Wl,-mllvm \ -Wl,-enable-licm-vrp LDCXXFLAGS = -Wl,-mllvm -Wl,-do-block-reorder=aggressive LDFFLAGS = -Wl,-mllvm -Wl,-inline-recursion=4 \ -Wl,-mllvm -Wl,-lsr-in-nested-loop \ -Wl,-mllvm -Wl,-enable-iv-split intspeed=base: submit = echo always > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command intspeed=peak: submit = echo always > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command ######################## # fpspeed tuning flags # ######################## fpspeed: CXX = clang++ -m64 -std=c++98 $[CROSSPLAT_PORT_OPTS] fpspeed=base: submit = echo always > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command fpspeed=peak: submit = echo always > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command ##################### # Peak tuning flags # ##################### default=peak: COPTIMIZE = $(OPT_ROOT_PEAK) -fstruct-layout=5 \ -mllvm -unroll-threshold=50 -fremap-arrays \ -flv-function-specialization -mllvm \ -inline-threshold=1000 -mllvm -enable-gvn-hoist \ -mllvm -global-vectorize-slp=true -mllvm \ -function-specialize -mllvm -enable-licm-vrp \ -mllvm -reduce-array-computations=3 \ -Wno-unused-command-line-argument CXXOPTIMIZE = $(OPT_ROOT_PEAK) -finline-aggressive \ -mllvm -unroll-threshold=100 \ -flv-function-specialization -mllvm -enable-licm-vrp \ -mllvm -reroll-loops -mllvm \ -aggressive-loop-unswitch -mllvm \ -reduce-array-computations=3 -mllvm \ -global-vectorize-slp=true \ -Wno-unused-command-line-argument FOPTIMIZE = $(OPT_ROOT_PEAK) -Mrecursive \ -mllvm -reduce-array-computations=3 \ -mllvm -global-vectorize-slp=true \ -mllvm -enable-licm-vrp \ -Wno-unused-command-line-argument EXTRA_LDFLAGS = -Wl,-mllvm -Wl,-function-specialize \ -Wl,-mllvm -Wl,-align-all-nofallthru-blocks=6 \ -Wl,-mllvm -Wl,-reduce-array-computations=3 LDFFLAGS = -Wl,-mllvm -Wl,-enable-X86-prefetching \ -Wl,-mllvm -Wl,-enable-licm-vrp LDCXXFLAGS = -Wl,-mllvm -Wl,-x86-use-vzeroupper=false \ -Wl,-mllvm -Wl,-enable-licm-vrp EXTRA_LIBS = -fopenmp=libomp -lomp -lamdlibm -ljemalloc -lflang -lm EXTRA_OPTIMIZE = -DSPEC_OPENMP -fopenmp -Wno-return-type feedback = 0 PASS1_CFLAGS = -fprofile-instr-generate PASS2_CFLAGS = -fprofile-instr-use PASS1_FFLAGS = -fprofile-generate PASS2_FFLAGS = -fprofile-use PASS1_CXXFLAGS = -fprofile-instr-generate PASS2_CXXFLAGS = -fprofile-instr-use PASS1_LDFLAGS = -fprofile-instr-generate PASS2_LDFLAGS = -fprofile-instr-use fdo_run1 = $command ; llvm-profdata merge --output=default.profdata *.profraw # Int benchmark specific peak tuning flags: # FP benchmark specific peak tuning flags: 603.bwaves_s=peak: FOPTIMIZE = -Ofast $(OPT_ROOT) -Mrecursive \ -mllvm -reduce-array-computations=3 \ -mllvm -global-vectorize-slp=true \ -mllvm -enable-licm-vrp \ -Wno-unused-command-line-argument submit = echo always > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command 607.cactuBSSN_s=peak: submit = echo always > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command 621.wrf_s=peak: FOPTIMIZE = -Hz,1,0x1 $(OPT_ROOT_BASE) -Mrecursive \ -mllvm -fuse-tile-inner-loop -funroll-loops \ -mllvm -extra-vectorizer-passes \ -mllvm -lsr-in-nested-loop \ -mllvm -enable-licm-vrp \ -mllvm -reduce-array-computations=3 \ -mllvm -global-vectorize-slp=true \ -Wno-unused-command-line-argument submit = echo always > /sys/kernel/mm/transparent_hugepage/enabled; numactl --interleave=all --physcpubind=0-%{physical_core_max} $command 627.cam4_s=peak: submit = echo never > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command 628.pop2_s=peak: FOPTIMIZE = $(OPT_ROOT) -Ofast -Mrecursive \ -mllvm -reduce-array-computations=3 \ -mllvm -global-vectorize-slp=true \ -mllvm -enable-licm-vrp \ -Wno-unused-command-line-argument submit = echo madvise > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command 638.imagick_s=peak: submit = echo madvise > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command 644.nab_s=peak: EXTRA_LDFLAGS = -Wl,-mllvm -Wl,-region-vectorize \ -Wl,-mllvm -Wl,-function-specialize submit = echo never > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command 649.fotonik3d_s=peak: ENV_PGHPF_ZMEM =yes submit = echo never > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command 654.roms_s=peak: FOPTIMIZE = -Ofast $(OPT_ROOT) -Mrecursive \ -mllvm -reduce-array-computations=3 \ -mllvm -global-vectorize-slp=true \ -mllvm -enable-licm-vrp \ -Wno-unused-command-line-argument submit = echo never > /sys/kernel/mm/transparent_hugepage/enabled; $BIND $command # The following settings were obtained by running the sysinfo_program # 'specperl $[top]/bin/sysinfo' (sysinfo:SHA:60a26e139a7df7ba5521c983304469c762a79f3394ac112dddae4bac7d1a4f55) default: notes_plat_sysinfo_000 = notes_plat_sysinfo_005 = Sysinfo program /home/cpu2017_B1/bin/sysinfo notes_plat_sysinfo_010 = Rev: r6538 of 2020-09-24 e8664e66d2d7080afeaa89d4b38e2f1c notes_plat_sysinfo_015 = running on dl385g10v2 Wed Apr 1 12:26:11 2020 notes_plat_sysinfo_020 = notes_plat_sysinfo_025 = SUT (System Under Test) info as seen by some common utilities. notes_plat_sysinfo_030 = For more information on this section, see notes_plat_sysinfo_035 = https://www.spec.org/cpu2017/Docs/config.html#sysinfo notes_plat_sysinfo_040 = notes_plat_sysinfo_045 = From /proc/cpuinfo notes_plat_sysinfo_050 = model name : AMD EPYC 7713 64-Core Processor notes_plat_sysinfo_055 = 2 "physical id"s (chips) notes_plat_sysinfo_060 = 128 "processors" notes_plat_sysinfo_065 = cores, siblings (Caution: counting these is hw and system dependent. The following notes_plat_sysinfo_070 = excerpts from /proc/cpuinfo might not be reliable. Use with caution.) notes_plat_sysinfo_075 = cpu cores : 64 notes_plat_sysinfo_080 = siblings : 64 notes_plat_sysinfo_085 = physical 0: cores 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 notes_plat_sysinfo_090 = 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 notes_plat_sysinfo_095 = 53 54 55 56 57 58 59 60 61 62 63 notes_plat_sysinfo_100 = physical 1: cores 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 notes_plat_sysinfo_105 = 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 notes_plat_sysinfo_110 = 53 54 55 56 57 58 59 60 61 62 63 notes_plat_sysinfo_115 = notes_plat_sysinfo_120 = From lscpu: notes_plat_sysinfo_125 = Architecture: x86_64 notes_plat_sysinfo_130 = CPU op-mode(s): 32-bit, 64-bit notes_plat_sysinfo_135 = Byte Order: Little Endian notes_plat_sysinfo_140 = Address sizes: 48 bits physical, 48 bits virtual notes_plat_sysinfo_145 = CPU(s): 128 notes_plat_sysinfo_150 = On-line CPU(s) list: 0-127 notes_plat_sysinfo_155 = Thread(s) per core: 1 notes_plat_sysinfo_160 = Core(s) per socket: 64 notes_plat_sysinfo_165 = Socket(s): 2 notes_plat_sysinfo_170 = NUMA node(s): 16 notes_plat_sysinfo_175 = Vendor ID: AuthenticAMD notes_plat_sysinfo_180 = CPU family: 25 notes_plat_sysinfo_185 = Model: 1 notes_plat_sysinfo_190 = Model name: AMD EPYC 7713 64-Core Processor notes_plat_sysinfo_195 = Stepping: 1 notes_plat_sysinfo_200 = CPU MHz: 1796.469 notes_plat_sysinfo_205 = BogoMIPS: 3992.57 notes_plat_sysinfo_210 = Virtualization: AMD-V notes_plat_sysinfo_215 = L1d cache: 4 MiB notes_plat_sysinfo_220 = L1i cache: 4 MiB notes_plat_sysinfo_225 = L2 cache: 64 MiB notes_plat_sysinfo_230 = L3 cache: 512 MiB notes_plat_sysinfo_235 = NUMA node0 CPU(s): 0-7 notes_plat_sysinfo_240 = NUMA node1 CPU(s): 8-15 notes_plat_sysinfo_245 = NUMA node2 CPU(s): 16-23 notes_plat_sysinfo_250 = NUMA node3 CPU(s): 24-31 notes_plat_sysinfo_255 = NUMA node4 CPU(s): 32-39 notes_plat_sysinfo_260 = NUMA node5 CPU(s): 40-47 notes_plat_sysinfo_265 = NUMA node6 CPU(s): 48-55 notes_plat_sysinfo_270 = NUMA node7 CPU(s): 56-63 notes_plat_sysinfo_275 = NUMA node8 CPU(s): 64-71 notes_plat_sysinfo_280 = NUMA node9 CPU(s): 72-79 notes_plat_sysinfo_285 = NUMA node10 CPU(s): 80-87 notes_plat_sysinfo_290 = NUMA node11 CPU(s): 88-95 notes_plat_sysinfo_295 = NUMA node12 CPU(s): 96-103 notes_plat_sysinfo_300 = NUMA node13 CPU(s): 104-111 notes_plat_sysinfo_305 = NUMA node14 CPU(s): 112-119 notes_plat_sysinfo_310 = NUMA node15 CPU(s): 120-127 notes_plat_sysinfo_315 = Vulnerability Itlb multihit: Not affected notes_plat_sysinfo_320 = Vulnerability L1tf: Not affected notes_plat_sysinfo_325 = Vulnerability Mds: Not affected notes_plat_sysinfo_330 = Vulnerability Meltdown: Not affected notes_plat_sysinfo_335 = Vulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via notes_plat_sysinfo_340 = prctl and seccomp notes_plat_sysinfo_345 = Vulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user notes_plat_sysinfo_350 = pointer sanitization notes_plat_sysinfo_355 = Vulnerability Spectre v2: Mitigation; Full AMD retpoline, IBPB conditional, notes_plat_sysinfo_360 = IBRS_FW, STIBP disabled, RSB filling notes_plat_sysinfo_365 = Vulnerability Srbds: Not affected notes_plat_sysinfo_370 = Vulnerability Tsx async abort: Not affected notes_plat_sysinfo_375 = Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr notes_plat_sysinfo_380 = pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt notes_plat_sysinfo_385 = pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid notes_plat_sysinfo_390 = aperfmperf pni pclmulqdq monitor ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe notes_plat_sysinfo_395 = popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a notes_plat_sysinfo_400 = misalignsse 3dnowprefetch osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb notes_plat_sysinfo_405 = bpext perfctr_llc mwaitx cpb cat_l3 cdp_l3 invpcid_single hw_pstate ssbd mba ibrs notes_plat_sysinfo_410 = ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 invpcid cqm rdt_a rdseed adx smap notes_plat_sysinfo_415 = clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves cqm_llc cqm_occup_llc notes_plat_sysinfo_420 = cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr wbnoinvd arat npt lbrv svm_lock notes_plat_sysinfo_425 = nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold notes_plat_sysinfo_430 = v_vmsave_vmload vgif umip pku ospke vaes vpclmulqdq rdpid overflow_recov succor smca notes_plat_sysinfo_435 = notes_plat_sysinfo_440 = /proc/cpuinfo cache data notes_plat_sysinfo_445 = cache size : 512 KB notes_plat_sysinfo_450 = notes_plat_sysinfo_455 = From numactl --hardware WARNING: a numactl 'node' might or might not correspond to a notes_plat_sysinfo_460 = physical chip. notes_plat_sysinfo_465 = available: 16 nodes (0-15) notes_plat_sysinfo_470 = node 0 cpus: 0 1 2 3 4 5 6 7 notes_plat_sysinfo_475 = node 0 size: 128775 MB notes_plat_sysinfo_480 = node 0 free: 128511 MB notes_plat_sysinfo_485 = node 1 cpus: 8 9 10 11 12 13 14 15 notes_plat_sysinfo_490 = node 1 size: 129022 MB notes_plat_sysinfo_495 = node 1 free: 128909 MB notes_plat_sysinfo_500 = node 2 cpus: 16 17 18 19 20 21 22 23 notes_plat_sysinfo_505 = node 2 size: 129022 MB notes_plat_sysinfo_510 = node 2 free: 128866 MB notes_plat_sysinfo_515 = node 3 cpus: 24 25 26 27 28 29 30 31 notes_plat_sysinfo_520 = node 3 size: 129022 MB notes_plat_sysinfo_525 = node 3 free: 128887 MB notes_plat_sysinfo_530 = node 4 cpus: 32 33 34 35 36 37 38 39 notes_plat_sysinfo_535 = node 4 size: 129022 MB notes_plat_sysinfo_540 = node 4 free: 128914 MB notes_plat_sysinfo_545 = node 5 cpus: 40 41 42 43 44 45 46 47 notes_plat_sysinfo_550 = node 5 size: 129022 MB notes_plat_sysinfo_555 = node 5 free: 128884 MB notes_plat_sysinfo_560 = node 6 cpus: 48 49 50 51 52 53 54 55 notes_plat_sysinfo_565 = node 6 size: 129022 MB notes_plat_sysinfo_570 = node 6 free: 128871 MB notes_plat_sysinfo_575 = node 7 cpus: 56 57 58 59 60 61 62 63 notes_plat_sysinfo_580 = node 7 size: 116909 MB notes_plat_sysinfo_585 = node 7 free: 116748 MB notes_plat_sysinfo_590 = node 8 cpus: 64 65 66 67 68 69 70 71 notes_plat_sysinfo_595 = node 8 size: 129022 MB notes_plat_sysinfo_600 = node 8 free: 128880 MB notes_plat_sysinfo_605 = node 9 cpus: 72 73 74 75 76 77 78 79 notes_plat_sysinfo_610 = node 9 size: 129022 MB notes_plat_sysinfo_615 = node 9 free: 128870 MB notes_plat_sysinfo_620 = node 10 cpus: 80 81 82 83 84 85 86 87 notes_plat_sysinfo_625 = node 10 size: 129022 MB notes_plat_sysinfo_630 = node 10 free: 128918 MB notes_plat_sysinfo_635 = node 11 cpus: 88 89 90 91 92 93 94 95 notes_plat_sysinfo_640 = node 11 size: 129022 MB notes_plat_sysinfo_645 = node 11 free: 128876 MB notes_plat_sysinfo_650 = node 12 cpus: 96 97 98 99 100 101 102 103 notes_plat_sysinfo_655 = node 12 size: 128997 MB notes_plat_sysinfo_660 = node 12 free: 128873 MB notes_plat_sysinfo_665 = node 13 cpus: 104 105 106 107 108 109 110 111 notes_plat_sysinfo_670 = node 13 size: 129022 MB notes_plat_sysinfo_675 = node 13 free: 128840 MB notes_plat_sysinfo_680 = node 14 cpus: 112 113 114 115 116 117 118 119 notes_plat_sysinfo_685 = node 14 size: 129022 MB notes_plat_sysinfo_690 = node 14 free: 128781 MB notes_plat_sysinfo_695 = node 15 cpus: 120 121 122 123 124 125 126 127 notes_plat_sysinfo_700 = node 15 size: 129016 MB notes_plat_sysinfo_705 = node 15 free: 128890 MB notes_plat_sysinfo_710 = node distances: notes_plat_sysinfo_715 = node 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 notes_plat_sysinfo_720 = 0: 10 11 11 11 11 11 11 11 32 32 32 32 32 32 32 32 notes_plat_sysinfo_725 = 1: 11 10 11 11 11 11 11 11 32 32 32 32 32 32 32 32 notes_plat_sysinfo_730 = 2: 11 11 10 11 11 11 11 11 32 32 32 32 32 32 32 32 notes_plat_sysinfo_735 = 3: 11 11 11 10 11 11 11 11 32 32 32 32 32 32 32 32 notes_plat_sysinfo_740 = 4: 11 11 11 11 10 11 11 11 32 32 32 32 32 32 32 32 notes_plat_sysinfo_745 = 5: 11 11 11 11 11 10 11 11 32 32 32 32 32 32 32 32 notes_plat_sysinfo_750 = 6: 11 11 11 11 11 11 10 11 32 32 32 32 32 32 32 32 notes_plat_sysinfo_755 = 7: 11 11 11 11 11 11 11 10 32 32 32 32 32 32 32 32 notes_plat_sysinfo_760 = 8: 32 32 32 32 32 32 32 32 10 11 11 11 11 11 11 11 notes_plat_sysinfo_765 = 9: 32 32 32 32 32 32 32 32 11 10 11 11 11 11 11 11 notes_plat_sysinfo_770 = 10: 32 32 32 32 32 32 32 32 11 11 10 11 11 11 11 11 notes_plat_sysinfo_775 = 11: 32 32 32 32 32 32 32 32 11 11 11 10 11 11 11 11 notes_plat_sysinfo_780 = 12: 32 32 32 32 32 32 32 32 11 11 11 11 10 11 11 11 notes_plat_sysinfo_785 = 13: 32 32 32 32 32 32 32 32 11 11 11 11 11 10 11 11 notes_plat_sysinfo_790 = 14: 32 32 32 32 32 32 32 32 11 11 11 11 11 11 10 11 notes_plat_sysinfo_795 = 15: 32 32 32 32 32 32 32 32 11 11 11 11 11 11 11 10 notes_plat_sysinfo_800 = notes_plat_sysinfo_805 = From /proc/meminfo notes_plat_sysinfo_810 = MemTotal: 2101211752 kB notes_plat_sysinfo_815 = HugePages_Total: 0 notes_plat_sysinfo_820 = Hugepagesize: 2048 kB notes_plat_sysinfo_825 = notes_plat_sysinfo_830 = /sbin/tuned-adm active notes_plat_sysinfo_835 = Current active profile: throughput-performance notes_plat_sysinfo_840 = notes_plat_sysinfo_845 = /usr/bin/lsb_release -d notes_plat_sysinfo_850 = Ubuntu 20.04.1 LTS notes_plat_sysinfo_855 = notes_plat_sysinfo_860 = From /etc/*release* /etc/*version* notes_plat_sysinfo_865 = debian_version: bullseye/sid notes_plat_sysinfo_870 = os-release: notes_plat_sysinfo_875 = NAME="Ubuntu" notes_plat_sysinfo_880 = VERSION="20.04.1 LTS (Focal Fossa)" notes_plat_sysinfo_885 = ID=ubuntu notes_plat_sysinfo_890 = ID_LIKE=debian notes_plat_sysinfo_895 = PRETTY_NAME="Ubuntu 20.04.1 LTS" notes_plat_sysinfo_900 = VERSION_ID="20.04" notes_plat_sysinfo_905 = HOME_URL="https://www.ubuntu.com/" notes_plat_sysinfo_910 = SUPPORT_URL="https://help.ubuntu.com/" notes_plat_sysinfo_915 = notes_plat_sysinfo_920 = uname -a: notes_plat_sysinfo_925 = Linux dl385g10v2 5.4.0-42-generic #46-Ubuntu SMP Fri Jul 10 00:24:02 UTC 2020 x86_64 notes_plat_sysinfo_930 = x86_64 x86_64 GNU/Linux notes_plat_sysinfo_935 = notes_plat_sysinfo_940 = Kernel self-reported vulnerability status: notes_plat_sysinfo_945 = notes_plat_sysinfo_950 = CVE-2018-12207 (iTLB Multihit): Not affected notes_plat_sysinfo_955 = CVE-2018-3620 (L1 Terminal Fault): Not affected notes_plat_sysinfo_960 = Microarchitectural Data Sampling: Not affected notes_plat_sysinfo_965 = CVE-2017-5754 (Meltdown): Not affected notes_plat_sysinfo_970 = CVE-2018-3639 (Speculative Store Bypass): Mitigation: Speculative Store notes_plat_sysinfo_975 = Bypass disabled via prctl and notes_plat_sysinfo_980 = seccomp notes_plat_sysinfo_985 = CVE-2017-5753 (Spectre variant 1): Mitigation: usercopy/swapgs notes_plat_sysinfo_990 = barriers and __user pointer notes_plat_sysinfo_995 = sanitization notes_plat_sysinfo_1000 = CVE-2017-5715 (Spectre variant 2): Mitigation: Full AMD retpoline, notes_plat_sysinfo_1005 = IBPB: conditional, IBRS_FW, STIBP: notes_plat_sysinfo_1010 = disabled, RSB filling notes_plat_sysinfo_1015 = CVE-2020-0543 (Special Register Buffer Data Sampling): Not affected notes_plat_sysinfo_1020 = CVE-2019-11135 (TSX Asynchronous Abort): Not affected notes_plat_sysinfo_1025 = notes_plat_sysinfo_1030 = run-level 5 Apr 1 12:23 notes_plat_sysinfo_1035 = notes_plat_sysinfo_1040 = SPEC is set to: /home/cpu2017_B1 notes_plat_sysinfo_1045 = Filesystem Type Size Used Avail Use% Mounted on notes_plat_sysinfo_1050 = /dev/mapper/ubuntu--vg-ubuntu--lv ext4 182G 52G 121G 30% / notes_plat_sysinfo_1055 = notes_plat_sysinfo_1060 = From /sys/devices/virtual/dmi/id notes_plat_sysinfo_1065 = Vendor: HPE notes_plat_sysinfo_1070 = Product: ProLiant DL385 Gen10 Plus notes_plat_sysinfo_1075 = Product Family: ProLiant notes_plat_sysinfo_1080 = Serial: CN79340HC3 notes_plat_sysinfo_1085 = notes_plat_sysinfo_1090 = Additional information from dmidecode follows. WARNING: Use caution when you interpret notes_plat_sysinfo_1095 = this section. The 'dmidecode' program reads system data which is "intended to allow notes_plat_sysinfo_1100 = hardware to be accurately determined", but the intent may not be met, as there are notes_plat_sysinfo_1105 = frequent changes to hardware, firmware, and the "DMTF SMBIOS" standard. notes_plat_sysinfo_1110 = Memory: notes_plat_sysinfo_1115 = 16x UNKNOWN M386AAG40AM3-CWE 128 GB 4 rank 3200 notes_plat_sysinfo_1120 = 16x UNKNOWN NOT AVAILABLE notes_plat_sysinfo_1125 = notes_plat_sysinfo_1130 = BIOS: notes_plat_sysinfo_1135 = BIOS Vendor: HPE notes_plat_sysinfo_1140 = BIOS Version: A42 notes_plat_sysinfo_1145 = BIOS Date: 02/15/2021 notes_plat_sysinfo_1150 = BIOS Revision: 2.40 notes_plat_sysinfo_1155 = Firmware Revision: 2.40 notes_plat_sysinfo_1160 = notes_plat_sysinfo_1165 = (End of data from sysinfo program) hw_cpu_name = AMD EPYC 7713 hw_disk = 1 x 182 GB SATA SSD, RAID 0 hw_memory000 = 2 TB (16 x 128 GB 4Rx4 PC4-3200AA-L) hw_nchips = 2 prepared_by = HPE Performance Engineering sw_file = ext4 sw_os000 = Ubuntu 20.04.1 LTS (x86_64) sw_os001 = Kernel 5.4.0-42-generic sw_state = Run level 5 (multi-user) # End of settings added by sysinfo_program 648.exchange2_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 641.leela_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 631.deepsjeng_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 625.x264_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 623.xalancbmk_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 605.mcf_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 602.gcc_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 600.perlbench_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. default: power_management000 = BIOS set to prefer performance at the cost of power_management001 = additional power usage notes_plat_000 =BIOS Configuration notes_plat_005 = Workload Profile set to General Peak Frequency Compute notes_plat_010 = AMD SMT Option set to Disabled notes_plat_015 = Determinism Control set to Manual notes_plat_020 = Performance Determinism set to Power Deterministic notes_plat_025 = Last-Level Cache (LLC) as NUMA Node set to Enabled notes_plat_030 = NUMA memory domains per socket set to One memory domain per socket notes_plat_035 = Thermal Configuration set to Maximum Cooling notes_plat_040 = Workload Profile set to Custom notes_plat_045 = Infinity Fabric Power Management set to Disabled notes_plat_050 = Infinity Fabric Performance State set to P0 notes_plat_055 = Power Regulator set to OS Control Mode