# Invocation command line: # /root/cpu2017-1.0.5/bin/harness/runcpu --configfile amd_speed_aocc200_rome_B1.cfg --tune all --reportable --iterations 3 --nopower --runmode speed --tune base:peak --size test:train:refspeed fpspeed # output_root was not used for this run ############################################################################ ################################################################################ # AMD AOCC 2.0.0 SPEC CPU2017 V1.0.5 Speed Configuration File for 64-bit Linux # # File name : amd_speed_aocc200_rome_B1.cfg # Creation Date : August 14, 2019 # CPU2017 Version : 1.0.5 # Supported benchmarks : All Speed benchmarks (intspeed, fpspeed) # Compiler name/version : AOCC v2.0.0 # Operating system version : Fedora 26 # Supported OS's : Ubuntu 18.04/19.04, RHEL 8.0, SLES 15 SP1 # Hardware : AMD Rome, Naples (AMD64) # FP Base Pointer Size : 64-bit # FP Peak Pointer Size : 64-bit # INT Base Pointer Size : 64-bit # INT Peak Pointer Size : 32/64-bit # Auto Parallization : No # # Note: DO NOT EDIT THIS FILE, the only edits required to properly run these # binaries are made in the ini Python file. Please consult Readme.amd_speed_aocc200_rome_B1.txt # for a few uncommon exceptions which require edits to this file. # # Description: # # This binary package automates away many of the complexities necessary to set # up and run SPEC CPU2017 under optimized conditions on AMD Rome/Naples-based # server platforms within Linux (AMD64). # # The binary package was built specifically for AMD Rome/Naples microprocessors and # is not intended to run on other products. # # Please install the binary package by following the instructions in # "Readme.amd_speed_aocc200_rome_B1.txt" under the "How To Use the Binaries" section. # # The binary package is designed to work without alteration on two socket AMD # Rome/Naples-based servers with 64 cores per socket, SMT enabled and 1 TiB of DDR4 # memory distributed evenly among all 16 channels using 32 GiB DIMMs. # # To run the binary package on other Rome/Naples configurations, please review # "Readme.amd_speed_aocc200_rome_B1.txt". In general, Rome or Naples CPUs # should be autodetected with no action required by the user. # # In most cases, it should be unnecessary to edit "amd_speed_aocc200_rome_B1.cfg" or any # other file besides "ini_amd_speed_aocc200_rome_B1.py" where reporting fields # and run conditions are set. # # The run script automatically sets the optimal number of speed copies and binds # them appropriately. # # The run script and accompanying binary package are designed to work on Ubuntu # 18.04/19.04, RHEL 8.0 and SLES 15 SP1. # # Important! If you write your own run script, please set the stack size to # "unlimited" when executing this binary package. Failure to do so may cause # some benchmarks to overflow the stack. For example, to set stack size within # the bash shell, include the following line somewhere at the top of your run # script before the runcpu invocation: # # ulimit -s unlimited # # Modification of this config file should only be necessary if you intend to # rebuild the binaries. General instructions for rebuilding the binaries are # found in-line below. # ################################################################################ # Include file name ################################################################################ # The include file contains fields that are commonly changed. This file is auto- # generated based upon INI file settings and should not need user modification # for runs. %define inc_file_name amd_speed_aocc200_rome_B1.inc #include: %{inc_file_name} # ----- Begin inclusion of 'amd_speed_aocc200_rome_B1.inc' ############################################################################ ################################################################################ ################################################################################ # File name: amd_speed_aocc200_rome_B1.inc # File generation code date: August 12, 2019 # File generation date/time: August 30, 2019 / 02:48:29 # # This file is automatically generated during a SPEC CPU2017 run. # # To modify inc file generation, please consult the readme file or the run # script. ################################################################################ ################################################################################ ################################################################################ ################################################################################ # The following macros are generated for use in the cfg file. ################################################################################ ################################################################################ %define logical_core_count 48 %define physical_core_count 24 ################################################################################ # The following macros define the Speed thread counts for the peak benchmarks. # # intspeed benchmarks: 600.perlbench_s,602.gcc_s,605.mcf_s,620.omnetpp_s, # 623.xalancbmk_s,625.x264_s,631.deepsjeng_s,641.leela_s,648.exchange2_s, # 657.xz_s # fpspeed benchmarks: 603.bwaves_s,607.cactuBSSN_s,619.lbm_s,621.wrf_s, # 627.cam4_s,628.pop2_s,638.imagick_s,644.nab_s,649.fotonik3d_s, # 654.roms_s # ################################################################################ # default preENV thread settings: default: preENV_OMP_THREAD_LIMIT = 48 preENV_GOMP_CPU_AFFINITY = 0-47 ################################################################################ ################################################################################ # intspeed base thread counts: intspeed=base: threads = 24 ENV_GOMP_CPU_AFFINITY = 0-23 bind0 = numactl --physcpubind=0-23 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # fpspeed base thread counts: fpspeed=base: threads = 24 ENV_GOMP_CPU_AFFINITY = 0-23 bind0 = numactl --physcpubind=0-23 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # peak thread counts: 1 600.perlbench_s,602.gcc_s,605.mcf_s,620.omnetpp_s,623.xalancbmk_s,625.x264_s,631.deepsjeng_s,641.leela_s,648.exchange2_s=peak: threads = 1 ENV_GOMP_CPU_AFFINITY = 0 bind0 = numactl --physcpubind=0 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # peak thread counts: 24 603.bwaves_s,607.cactuBSSN_s,627.cam4_s,628.pop2_s,638.imagick_s,649.fotonik3d_s,654.roms_s,657.xz_s=peak: threads = 24 ENV_GOMP_CPU_AFFINITY = 0-23 bind0 = numactl --physcpubind=0-23 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ # peak thread counts: 48 619.lbm_s,621.wrf_s,644.nab_s=peak: threads = 48 ENV_GOMP_CPU_AFFINITY = 0 24 1 25 2 26 3 27 4 28 5 29 6 30 7 31 8 32 9 33 10 34 11 35 12 36 13 37 14 38 15 39 16 40 17 41 18 42 19 43 20 44 21 45 22 46 23 47 bind0 = numactl --physcpubind=0-47 submit = echo "$command" > run.sh ; $BIND bash run.sh ################################################################################ ################################################################################ ################################################################################ # Switch back to default: default: ################################################################################ ################################################################################ ################################################################################ # The remainder of this file defines CPU2017 report parameters. ################################################################################ ################################################################################ ################################################################################ # SPEC CPU 2017 report header ################################################################################ license_num =55 # (Your SPEC license number) tester =Dell Inc. test_sponsor =Dell Inc. hw_vendor =Dell Inc. hw_model000 =PowerEdge R6515 (AMD EPYC 7402P, 2.80 GHz) #--------- If you install new compilers, edit this section -------------------- sw_compiler =C/C++/Fortran: Version 2.0.0 of AOCC ################################################################################ ################################################################################ # Hardware, firmware and software information ################################################################################ hw_avail =Sep-2019 sw_avail =Aug-2019 hw_cpu_name =AMD EPYC ROME24 hw_cpu_nominal_mhz =2800 hw_cpu_max_mhz =3350 hw_ncores =24 hw_nthreadspercore =2 hw_ncpuorder =1 chip hw_other =None # Other perf-relevant hw, or "None" fw_bios =Version 1.0.4 released Aug-2019 sw_base_ptrsize =64-bit hw_pcache =32 KB I + 32 KB D on chip per core hw_scache =512 KB I+D on chip per core hw_tcache000 =128 MB I+D on chip per chip, 16 MB shared / 3 hw_tcache001 = cores hw_ocache =None ################################################################################ # Notes ################################################################################ # Enter notes_000 through notes_100 here. notes_050 =Binaries were compiled on a system with 2x AMD EPYC 7601 CPU + 512GB Memory using Fedora 26 notes_055 = notes_060 =NA: The test sponsor attests, as of date of publication, that CVE-2017-5754 (Meltdown) notes_065 =is mitigated in the system as tested and documented. notes_070 =Yes: The test sponsor attests, as of date of publication, that CVE-2017-5753 (Spectre variant 1) notes_075 =is mitigated in the system as tested and documented. notes_080 =Yes: The test sponsor attests, as of date of publication, that CVE-2017-5715 (Spectre variant 2) notes_085 =is mitigated in the system as tested and documented. notes_090 = notes_submit_000 ='numactl' was used to bind copies to the cores. notes_submit_005 =See the configuration file for details. notes_os_000 ='ulimit -s unlimited' was used to set environment stack size notes_os_005 ='ulimit -l 2097152' was used to set environment locked pages in memory limit notes_os_010 = notes_os_015 =runcpu command invoked through numactl i.e.: notes_os_020 =numactl --interleave=all runcpu notes_os_025 = notes_os_030 =Set dirty_ratio=8 to limit dirty cache to 8% of memory notes_os_035 =Set swappiness=1 to swap only if necessary notes_os_040 =Set zone_reclaim_mode=1 to free local node memory and avoid remote memory notes_os_045 =sync then drop_caches=3 to reset caches before invoking runcpu notes_os_050 = notes_os_055 =dirty_ratio, swappiness, zone_reclaim_mode and drop_caches were notes_os_060 =all set using privileged echo (e.g. echo 1 > /proc/sys/vm/swappiness). notes_os_065 = notes_os_070 =Transparent huge pages set to 'always' for this run (OS default) notes_comp_000 =The AMD64 AOCC Compiler Suite is available at notes_comp_005 =http://developer.amd.com/amd-aocc/ notes_comp_010 = notes_jemalloc_000 =jemalloc: configured and built with GCC v9.1.0 in Ubuntu 19.04 with -O3 -znver2 -flto notes_jemalloc_005 =jemalloc 5.1.0 is available here: notes_jemalloc_010 =https://github.com/jemalloc/jemalloc/releases/download/5.1.0/jemalloc-5.1.0.tar.bz2 notes_jemalloc_015 = sw_other =jemalloc: jemalloc memory allocator library v5.1.0 ################################################################################ # The following note fields describe platorm settings. ################################################################################ # example: (uncomment as necessary) notes_plat_000 =BIOS settings: notes_plat_005 = NUMA Nodes Per Socket set to 4 notes_plat_010 = CCX as NUMA Domain set to Enabled notes_plat_015 = System Profile set to Custom notes_plat_020 = CPU Power Management set to Maximum Performance notes_plat_025 = Memory Frequency set to Maximum Performance notes_plat_030 = Turbo Boost Enabled notes_plat_035 = C states set to Enabled notes_plat_040 = Memory Patrol Scrub Disabled notes_plat_045 = Memory Refresh Rate set to 1x notes_plat_050 = PCI ASPM L1 Link Power Management Disabled notes_plat_055 = Determinism Slider set to Power Determinism notes_plat_060 = Efficiency Optimized Mode Disabled ################################################################################ # The following are custom fields: ################################################################################ # Use custom_fields to enter lines that are not listed here. For example: # notes_plat_100 = Energy Bias set to Max Performance # new_field = Ambient temperature set to 10C ################################################################################ # The following fields must be set here for only Int benchmarks. ################################################################################ intspeed: sw_peak_ptrsize =32/64-bit ################################################################################ # The following fields must be set here for FP benchmarks. ################################################################################ fpspeed: sw_peak_ptrsize =64-bit ################################################################################ # The following fields must be set here or they will be overwritten by sysinfo. ################################################################################ intspeed,fpspeed: hw_disk =1 x 960 GB SATA SSD hw_memory000 =256 GB (8 x 32 GB 2Rx4 PC4-3200AA-R, hw_memory001 =running at 3200) hw_memory002 = hw_nchips =1 prepared_by =Dell Inc. sw_file =xfs sw_os000 =SUSE Linux Enterprise Server 15 SP1 sw_os001 =kernel 4.12.14-195-default sw_state =Run level 3 (multi-user) ################################################################################ # End of inc file ################################################################################ # Switch back to the default block after the include file: default: # ---- End inclusion of '/root/cpu2017-1.0.5/config/amd_speed_aocc200_rome_B1.inc' # Switch back to default block after the include file: default: ################################################################################ # Binary label extension and "allow_build"" switch ################################################################################ # Only modify the binary label extension if you plan to rebuild the binaries. %define ext amd_speed_aocc200_rome_B # If you plan to recompile these CPU2017 binaries, please choose a new extension # name (ext above) to avoid confusion with the current binary set on your system # under test, and to avoid confusion for SPEC submission reviewers. You will # also need to set "allow_build" to true below. Finally, you must modify the # Paths section below to point to your library locations if the paths are not # already set up in your build environment. # Change the following line to true if you intend to REBUILD the binaries (AMD # does not support this). Valid values are "true" or "false" (no quotes). %define allow_build false # Allow environment variables to be set before runs: preenv = 1 # Necessary to avoid out-of-memory exceptions on certain SUTs: preENV_MALLOC_CONF = retain:true ################################################################################ # Paths -- MODIFY AS NEEDED (modification should not be necessary for runs) ################################################################################ # Set location of runtime libraries for runs or builds. # Define the name of the directory that holds AMD library files: %define lib_dir amd_speed_aocc200_rome_B_lib # Define 32-bit library paths: # Do not use $[top] because this will cause a flag checksum error triggering a # xalanc recompile attempt on other SUTs: JEMALLOC_LIB32_PATH = /sppo/dev/cpu2017/amd_speed_aocc200_rome/%{lib_dir}/32 OMP_LIB32_PATH = /sppo/dev/cpu2017/amd_speed_aocc200_rome/%{lib_dir}/32 %if '%{allow_build}' eq 'false' fail_build = 1 # Runtime libraries: preENV_LD_LIBRARY_PATH = $[top]/%{lib_dir}/64;$[top]/%{lib_dir}/32:%{ENV_LD_LIBRARY_PATH} preENV_OMP_STACKSIZE = 128M preENV_OMP_SCHEDULE = static preENV_OMP_DYNAMIC = false %elif '%{allow_build}' eq 'true' # If you intend to rebuild, be sure to set the library paths either in the # build script or here: % define build_ncpus 64 # controls number of simultaneous compiles fail_build = 0 makeflags = --jobs=%{build_ncpus} --load-average=%{build_ncpus} %else % error The value of "allow_build" is %{allow_build}, but it can only be "true" or "false". This error was generated %endif ################################################################################ # Enable automated data collection per benchmark ################################################################################ # Data collection is not enabled for reportable runs. # teeout is necessary to get data collection stdout into the logs. Best # practices for the individual data collection items would be to have # them store important output in separate files. Filenames could be # constructed from $SPEC (environment), $lognum (result number from runcpu), # and benchmark name/number. teeout = yes # Run runcpu with '-v 35' (or greater) to log lists of variables which can # be used in substitutions as below. # For CPU2006, change $label to $ext %define data-collection-parameters benchname='$name' benchnum='$num' benchmark='$benchmark' iteration=$iter size='$size' tune='$tune' label='$label' log='$log' lognum='$lognum' from_runcpu='$from_runcpu' %define data-collection-start $[top]/data-collection/data-collection start %{data-collection-parameters} %define data-collection-stop $[top]/data-collection/data-collection stop %{data-collection-parameters} monitor_specrun_wrapper = %{data-collection-start} ; $command ; %{data-collection-stop} ################################################################################ # Header settings ################################################################################ backup_config = 0 # set to 0 if you do not want backup files bench_post_setup = sync # command_add_redirect: If set, the generated ${command} will include # redirection operators (stdout, stderr), which are passed along to the shell # that executes the command. If this variable is not set, specinvoke does the # redirection. command_add_redirect = yes env_vars = yes flagsurl000 = http://www.spec.org/cpu2017/flags/aocc200-flags-C1-Dell.xml flagsurl001 = http://www.spec.org/cpu2017/flags/Dell-Platform-Flags-PowerEdge-revE4.xml #flagsurl02 = $[top]/INVALID-platform-amd_speed_aocc200_revB-I.xml # label: User defined extension string that tags your binaries & directories: label = %{ext} line_width = 1020 log_line_width = 1020 mean_anyway = yes output_format = all reportable = yes size = test,train,ref teeout = yes teerunout = yes tune = base,peak use_submit_for_speed = yes ################################################################################ # Compilers ################################################################################ default: CC = clang CXX = clang++ FC = flang CLD = clang FLD = flang CC_VERSION_OPTION = --version CXX_VERSION_OPTION = --version FC_VERSION_OPTION = --version default:# data model applies to all benchmarks ################################################################################ # Default Flags ################################################################################ EXTRA_PORTABILITY = -DSPEC_LP64 EXTRA_LIBS = -fopenmp=libomp -lomp -ljemalloc -lamdlibm -lm MATHLIBOPT = ################################################################################ # Portability Flags ################################################################################ default: # *** Benchmark-specific portability *** # Anything other than the data model is only allowed where a need is proven. # (ordered by last 2 digits of benchmark number) 600.perlbench_s: #lang='C' PORTABILITY = -DSPEC_LINUX_X64 621.wrf_s: #lang='F,C' CPORTABILITY = -DSPEC_CASE_FLAG FPORTABILITY = -Mbyteswapio 623.xalancbmk_s: #lang='CXX' PORTABILITY = -DSPEC_LINUX 627.cam4_s: #lang='F,C' PORTABILITY = -DSPEC_CASE_FLAG 628.pop2_s: #lang='F,C' CPORTABILITY = -DSPEC_CASE_FLAG FPORTABILITY = -Mbyteswapio ################################################################################ # Tuning Flags ################################################################################ ##################### # Base tuning flags # ##################### default=base: #optimize flags COPTIMIZE = -O3 -flto -ffast-math -march=znver2 -fstruct-layout=3 \ -mllvm -unroll-threshold=50 -fremap-arrays \ -mllvm -function-specialize -mllvm -enable-gvn-hoist \ -mllvm -reduce-array-computations=3 -mllvm -global-vectorize-slp \ -mllvm -vector-library=LIBMVEC \ -mllvm -inline-threshold=1000 -flv-function-specialization CXXOPTIMIZE = -O3 -flto -ffast-math -march=znver2 \ -mllvm -loop-unswitch-threshold=200000 \ -mllvm -vector-library=LIBMVEC \ -mllvm -unroll-threshold=100 -flv-function-specialization \ -mllvm -enable-partial-unswitch FOPTIMIZE = -O3 -flto -march=znver2 -funroll-loops -Mrecursive \ -mllvm -vector-library=LIBMVEC EXTRA_FFLAGS = -Kieee -fno-finite-math-only #linker flags LDFLAGS = -flto -Wl,-mllvm -Wl,-function-specialize \ -Wl,-mllvm -Wl,-region-vectorize \ -Wl,-mllvm -Wl,-vector-library=LIBMVEC \ -Wl,-mllvm -Wl,-reduce-array-computations=3 LDCXXFLAGS = -Wl,-mllvm -Wl,-suppress-fmas #other libraries # Put OpenMP and math libraries here: EXTRA_LIBS = -fopenmp=libomp -lomp -lpthread -ldl -lmvec -lamdlibm -ljemalloc -lflang -lm # Don't put the AMD and mvec math libraries in MATHLIBOPT because it will trigger a reporting issue # because GCC won't use them. Forcefeed all benchmarks the math libraries in EXTRA_LIBS and clear # out MATHLIBOPT. MATHLIBOPT = # The following is necessary for 502/602 gcc: EXTRA_OPTIMIZE = -DSPEC_OPENMP -fopenmp -Wno-return-type -DUSE_OPENMP # The following is necessary for 502/602 gcc: LDOPTIMIZE = -z muldefs ######################## # intspeed tuning flags # ######################## intspeed: EXTRA_FFLAGS = -ffast-math \ -mllvm -disable-indvar-simplify \ -mllvm -unroll-aggressive \ -mllvm -unroll-threshold=150 LDFFLAGS = -ffast-math \ -Wl,-mllvm -Wl,-inline-recursion=4 \ -Wl,-mllvm -Wl,-lsr-in-nested-loop \ -Wl,-mllvm -Wl,-enable-iv-split ######################## # fpspeed tuning flags # ######################## fpspeed: CXX = clang++ -std=c++98 ##################### # Peak tuning flags # ##################### default=peak: #optimize flags COPTIMIZE = -Ofast -flto -march=znver2 -mno-sse4a -fstruct-layout=5 \ -mllvm -vectorize-memory-aggressively \ -mllvm -function-specialize -mllvm -enable-gvn-hoist \ -mllvm -unroll-threshold=50 -fremap-arrays \ -mllvm -vector-library=LIBMVEC \ -mllvm -reduce-array-computations=3 -mllvm -global-vectorize-slp \ -mllvm -inline-threshold=1000 -flv-function-specialization CXXOPTIMIZE = -Ofast -flto -march=znver2 -flv-function-specialization \ -mllvm -unroll-threshold=100 -mllvm -enable-partial-unswitch \ -mllvm -loop-unswitch-threshold=200000 \ -mllvm -vector-library=LIBMVEC \ -mllvm -inline-threshold=1000 FOPTIMIZE = -O3 -flto -march=znver2 -funroll-loops -Mrecursive \ -mllvm -vector-library=LIBMVEC EXTRA_FFLAGS = -Kieee -fno-finite-math-only #linker flags LDFLAGS = -flto -Wl,-mllvm -Wl,-function-specialize \ -Wl,-mllvm -Wl,-region-vectorize \ -Wl,-mllvm -Wl,-vector-library=LIBMVEC \ -Wl,-mllvm -Wl,-reduce-array-computations=3 #libraries EXTRA_LIBS = -fopenmp=libomp -lomp -lpthread -ldl -lmvec -lamdlibm -ljemalloc -lflang -lm EXTRA_OPTIMIZE = -DSPEC_OPENMP -fopenmp -Wno-return-type -DUSE_OPENMP EXTRA_FLIBS = -lmvec -lamdlibm -lm MATHLIBOPT = -lmvec -lamdlibm -lm feedback = 0 PASS1_CFLAGS = -fprofile-instr-generate PASS2_CFLAGS = -fprofile-instr-use PASS1_FFLAGS = -fprofile-generate PASS2_FFLAGS = -fprofile-use PASS1_CXXFLAGS = -fprofile-instr-generate PASS2_CXXFLAGS = -fprofile-instr-use PASS1_LDFLAGS = -fprofile-instr-generate PASS2_LDFLAGS = -fprofile-instr-use fdo_run1 = $command ; llvm-profdata merge -output=default.profdata *.profraw ######################################## # Benchmark specific peak tuning flags # ######################################## 600.perlbench_s=peak: #lang='C' feedback = 1 602.gcc_s=peak: #lang='C' EXTRA_COPTIMIZE = -fgnu89-inline LDOPTIMIZE = -z muldefs EXTRA_LIBS = -fopenmp=libomp -lomp -lpthread -ldl -lm -ljemalloc MATHLIBOPT = -lm 623.xalancbmk_s=peak: #lang='CXX` EXTRA_PORTABILITY = -D_FILE_OFFSET_BITS=64 CXX = clang++ -m32 CXXLD = clang++ -m32 EXTRA_LIBS = -L$[OMP_LIB32_PATH] -fopenmp=libomp -L$[OMP_LIB32_PATH] -lomp -lpthread -ldl -L$[JEMALLOC_LIB32_PATH] -ljemalloc MATHLIBOPT = -lm ENV_OMP_STACKSIZE = 128M 625.x264_s=peak: #lang='C' feedback = 1 654.roms_s=peak: LDFFLAGS = -Wl,-mllvm -Wl,-enable-X86-prefetching # The following settings were obtained by running the sysinfo_program # 'specperl $[top]/bin/sysinfo' (sysinfo:SHA:32259ebd59f3e93740723202f27c44c82ee68e0f2e40cd2ca50cfd5519772397) default: notes_plat_sysinfo_000 = Sysinfo program /root/cpu2017-1.0.5/bin/sysinfo notes_plat_sysinfo_005 = Rev: r5974 of 2018-05-19 9bcde8f2999c33d61f64985e45859ea9 notes_plat_sysinfo_010 = running on linux-g3ob Fri Aug 30 08:00:18 2019 notes_plat_sysinfo_015 = notes_plat_sysinfo_020 = SUT (System Under Test) info as seen by some common utilities. notes_plat_sysinfo_025 = For more information on this section, see notes_plat_sysinfo_030 = https://www.spec.org/cpu2017/Docs/config.html#sysinfo notes_plat_sysinfo_035 = notes_plat_sysinfo_040 = From /proc/cpuinfo notes_plat_sysinfo_045 = model name : AMD EPYC 7402P 24-Core Processor notes_plat_sysinfo_050 = 1 "physical id"s (chips) notes_plat_sysinfo_055 = 48 "processors" notes_plat_sysinfo_060 = cores, siblings (Caution: counting these is hw and system dependent. The following notes_plat_sysinfo_065 = excerpts from /proc/cpuinfo might not be reliable. Use with caution.) notes_plat_sysinfo_070 = cpu cores : 24 notes_plat_sysinfo_075 = siblings : 48 notes_plat_sysinfo_080 = physical 0: cores 0 1 2 4 5 6 8 9 10 12 13 14 16 17 18 20 21 22 24 25 26 28 29 30 notes_plat_sysinfo_085 = notes_plat_sysinfo_090 = From lscpu: notes_plat_sysinfo_095 = Architecture: x86_64 notes_plat_sysinfo_100 = CPU op-mode(s): 32-bit, 64-bit notes_plat_sysinfo_105 = Byte Order: Little Endian notes_plat_sysinfo_110 = Address sizes: 43 bits physical, 48 bits virtual notes_plat_sysinfo_115 = CPU(s): 48 notes_plat_sysinfo_120 = On-line CPU(s) list: 0-47 notes_plat_sysinfo_125 = Thread(s) per core: 2 notes_plat_sysinfo_130 = Core(s) per socket: 24 notes_plat_sysinfo_135 = Socket(s): 1 notes_plat_sysinfo_140 = NUMA node(s): 8 notes_plat_sysinfo_145 = Vendor ID: AuthenticAMD notes_plat_sysinfo_150 = CPU family: 23 notes_plat_sysinfo_155 = Model: 49 notes_plat_sysinfo_160 = Model name: AMD EPYC 7402P 24-Core Processor notes_plat_sysinfo_165 = Stepping: 0 notes_plat_sysinfo_170 = CPU MHz: 2794.434 notes_plat_sysinfo_175 = BogoMIPS: 5588.86 notes_plat_sysinfo_180 = Virtualization: AMD-V notes_plat_sysinfo_185 = L1d cache: 32K notes_plat_sysinfo_190 = L1i cache: 32K notes_plat_sysinfo_195 = L2 cache: 512K notes_plat_sysinfo_200 = L3 cache: 16384K notes_plat_sysinfo_205 = NUMA node0 CPU(s): 0-2,24-26 notes_plat_sysinfo_210 = NUMA node1 CPU(s): 3-5,27-29 notes_plat_sysinfo_215 = NUMA node2 CPU(s): 6-8,30-32 notes_plat_sysinfo_220 = NUMA node3 CPU(s): 9-11,33-35 notes_plat_sysinfo_225 = NUMA node4 CPU(s): 12-14,36-38 notes_plat_sysinfo_230 = NUMA node5 CPU(s): 15-17,39-41 notes_plat_sysinfo_235 = NUMA node6 CPU(s): 18-20,42-44 notes_plat_sysinfo_240 = NUMA node7 CPU(s): 21-23,45-47 notes_plat_sysinfo_245 = Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov notes_plat_sysinfo_250 = pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm notes_plat_sysinfo_255 = constant_tsc rep_good nopl xtopology nonstop_tsc cpuid extd_apicid aperfmperf pni notes_plat_sysinfo_260 = pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c notes_plat_sysinfo_265 = rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch notes_plat_sysinfo_270 = osvw ibs skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_l2 mwaitx cpb notes_plat_sysinfo_275 = cat_l3 cdp_l3 hw_pstate sme ssbd sev ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep notes_plat_sysinfo_280 = bmi2 cqm rdt_a rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 xsaves notes_plat_sysinfo_285 = cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local clzero irperf xsaveerptr arat npt notes_plat_sysinfo_290 = lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter notes_plat_sysinfo_295 = pfthreshold avic v_vmsave_vmload vgif umip rdpid overflow_recov succor smca notes_plat_sysinfo_300 = notes_plat_sysinfo_305 = /proc/cpuinfo cache data notes_plat_sysinfo_310 = cache size : 512 KB notes_plat_sysinfo_315 = notes_plat_sysinfo_320 = From numactl --hardware WARNING: a numactl 'node' might or might not correspond to a notes_plat_sysinfo_325 = physical chip. notes_plat_sysinfo_330 = available: 8 nodes (0-7) notes_plat_sysinfo_335 = node 0 cpus: 0 1 2 24 25 26 notes_plat_sysinfo_340 = node 0 size: 31692 MB notes_plat_sysinfo_345 = node 0 free: 31459 MB notes_plat_sysinfo_350 = node 1 cpus: 3 4 5 27 28 29 notes_plat_sysinfo_355 = node 1 size: 32254 MB notes_plat_sysinfo_360 = node 1 free: 32183 MB notes_plat_sysinfo_365 = node 2 cpus: 6 7 8 30 31 32 notes_plat_sysinfo_370 = node 2 size: 32225 MB notes_plat_sysinfo_375 = node 2 free: 32147 MB notes_plat_sysinfo_380 = node 3 cpus: 9 10 11 33 34 35 notes_plat_sysinfo_385 = node 3 size: 32254 MB notes_plat_sysinfo_390 = node 3 free: 32172 MB notes_plat_sysinfo_395 = node 4 cpus: 12 13 14 36 37 38 notes_plat_sysinfo_400 = node 4 size: 32254 MB notes_plat_sysinfo_405 = node 4 free: 32177 MB notes_plat_sysinfo_410 = node 5 cpus: 15 16 17 39 40 41 notes_plat_sysinfo_415 = node 5 size: 32254 MB notes_plat_sysinfo_420 = node 5 free: 32185 MB notes_plat_sysinfo_425 = node 6 cpus: 18 19 20 42 43 44 notes_plat_sysinfo_430 = node 6 size: 32254 MB notes_plat_sysinfo_435 = node 6 free: 32187 MB notes_plat_sysinfo_440 = node 7 cpus: 21 22 23 45 46 47 notes_plat_sysinfo_445 = node 7 size: 32241 MB notes_plat_sysinfo_450 = node 7 free: 32112 MB notes_plat_sysinfo_455 = node distances: notes_plat_sysinfo_460 = node 0 1 2 3 4 5 6 7 notes_plat_sysinfo_465 = 0: 10 11 12 12 12 12 12 12 notes_plat_sysinfo_470 = 1: 11 10 12 12 12 12 12 12 notes_plat_sysinfo_475 = 2: 12 12 10 11 12 12 12 12 notes_plat_sysinfo_480 = 3: 12 12 11 10 12 12 12 12 notes_plat_sysinfo_485 = 4: 12 12 12 12 10 11 12 12 notes_plat_sysinfo_490 = 5: 12 12 12 12 11 10 12 12 notes_plat_sysinfo_495 = 6: 12 12 12 12 12 12 10 11 notes_plat_sysinfo_500 = 7: 12 12 12 12 12 12 11 10 notes_plat_sysinfo_505 = notes_plat_sysinfo_510 = From /proc/meminfo notes_plat_sysinfo_515 = MemTotal: 263609312 kB notes_plat_sysinfo_520 = HugePages_Total: 0 notes_plat_sysinfo_525 = Hugepagesize: 2048 kB notes_plat_sysinfo_530 = notes_plat_sysinfo_535 = From /etc/*release* /etc/*version* notes_plat_sysinfo_540 = os-release: notes_plat_sysinfo_545 = NAME="SLES" notes_plat_sysinfo_550 = VERSION="15-SP1" notes_plat_sysinfo_555 = VERSION_ID="15.1" notes_plat_sysinfo_560 = PRETTY_NAME="SUSE Linux Enterprise Server 15 SP1" notes_plat_sysinfo_565 = ID="sles" notes_plat_sysinfo_570 = ID_LIKE="suse" notes_plat_sysinfo_575 = ANSI_COLOR="0;32" notes_plat_sysinfo_580 = CPE_NAME="cpe:/o:suse:sles:15:sp1" notes_plat_sysinfo_585 = notes_plat_sysinfo_590 = uname -a: notes_plat_sysinfo_595 = Linux linux-g3ob 4.12.14-195-default #1 SMP Tue May 7 10:55:11 UTC 2019 (8fba516) notes_plat_sysinfo_600 = x86_64 x86_64 x86_64 GNU/Linux notes_plat_sysinfo_605 = notes_plat_sysinfo_610 = Kernel self-reported vulnerability status: notes_plat_sysinfo_615 = notes_plat_sysinfo_620 = CVE-2017-5754 (Meltdown): Not affected notes_plat_sysinfo_625 = CVE-2017-5753 (Spectre variant 1): Mitigation: __user pointer sanitization notes_plat_sysinfo_630 = CVE-2017-5715 (Spectre variant 2): Mitigation: Full AMD retpoline, IBPB: conditional, notes_plat_sysinfo_635 = IBRS_FW, STIBP: conditional, RSB filling notes_plat_sysinfo_640 = notes_plat_sysinfo_645 = run-level 3 Aug 29 12:08 last=5 notes_plat_sysinfo_650 = notes_plat_sysinfo_655 = SPEC is set to: /root/cpu2017-1.0.5 notes_plat_sysinfo_660 = Filesystem Type Size Used Avail Use% Mounted on notes_plat_sysinfo_665 = /dev/sda2 xfs 440G 39G 402G 9% / notes_plat_sysinfo_670 = notes_plat_sysinfo_675 = Additional information from dmidecode follows. WARNING: Use caution when you interpret notes_plat_sysinfo_680 = this section. The 'dmidecode' program reads system data which is "intended to allow notes_plat_sysinfo_685 = hardware to be accurately determined", but the intent may not be met, as there are notes_plat_sysinfo_690 = frequent changes to hardware, firmware, and the "DMTF SMBIOS" standard. notes_plat_sysinfo_695 = BIOS Dell Inc. 1.0.4 08/26/2019 notes_plat_sysinfo_700 = Memory: notes_plat_sysinfo_705 = 8x 802C8632802C 36ASF4G72PZ-3G2E2 32 GB 2 rank 3200 notes_plat_sysinfo_710 = 8x Not Specified Not Specified notes_plat_sysinfo_715 = notes_plat_sysinfo_720 = (End of data from sysinfo program) hw_cpu_name = AMD EPYC 7402P hw_disk = 440 GB add more disk info here hw_memory001 = 251.397 GB fixme: If using DDR4, the format is: hw_memory002 = 'N GB (N x N GB nRxn PC4-nnnnX-X)' hw_nchips = 1 prepared_by = root (is never output, only tags rawfile) sw_file = xfs sw_os001 = NAME="SLES" sw_os002 = 4.12.14-195-default sw_state = Run level 3 (add definition here) # End of settings added by sysinfo_program # The following section was added automatically, and contains settings that # did not appear in the original configuration file, but were added to the # raw file after the run. 638.imagick_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 628.pop2_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 621.wrf_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 619.lbm_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 603.bwaves_s: # The following setting was inserted automatically as a result of # post-run basepeak application. basepeak = 1 default: notes_000 =Environment variables set by runcpu before the start of the run: notes_005 =GOMP_CPU_AFFINITY = "0-47" notes_010 =LD_LIBRARY_PATH = "/root/cpu2017-1.0.5/amd_speed_aocc200_rome_B_lib/64; notes_015 =/root/cpu2017-1.0.5/amd_speed_aocc200_rome_B_lib/32" notes_020 =MALLOC_CONF = "retain:true" notes_025 =OMP_DYNAMIC = "false" notes_030 =OMP_SCHEDULE = "static" notes_035 =OMP_STACKSIZE = "128M" notes_040 =OMP_THREAD_LIMIT = "48" notes_045 =