Skip to content

Commit

Permalink
Auto-merge updates from auto-update branch
Browse files Browse the repository at this point in the history
  • Loading branch information
mlcommons-bot committed Jan 1, 2025
2 parents fc3a92d + e3d48b1 commit 4a9d3d4
Show file tree
Hide file tree
Showing 30 changed files with 4,780 additions and 996 deletions.
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
{
"accelerator_frequency": "2520000 MHz",
"accelerator_frequency": "2610000 MHz",
"accelerator_host_interconnect": "N/A",
"accelerator_interconnect": "N/A",
"accelerator_interconnect_topology": "",
"accelerator_memory_capacity": "23.64971923828125 GB",
"accelerator_memory_capacity": "23.54595947265625 GB",
"accelerator_memory_configuration": "N/A",
"accelerator_model_name": "NVIDIA GeForce RTX 4090",
"accelerator_on-chip_memories": "",
Expand All @@ -16,17 +16,17 @@
"host_network_card_count": "1",
"host_networking": "Gig Ethernet",
"host_networking_topology": "N/A",
"host_processor_caches": "L1d cache: 576 KiB, L1i cache: 384 KiB, L2 cache: 24 MiB, L3 cache: ",
"host_processor_core_count": "24",
"host_processor_frequency": "5800.0000",
"host_processor_caches": "L1d cache: 512 KiB, L1i cache: 512 KiB, L2 cache: 16 MiB, L3 cache: 64 MiB",
"host_processor_core_count": "16",
"host_processor_frequency": "5881.0000",
"host_processor_interconnect": "",
"host_processor_model_name": "13th Gen Intel(R) Core(TM) i9-13900K",
"host_processor_model_name": "AMD Ryzen 9 7950X 16-Core Processor",
"host_processors_per_node": "1",
"host_storage_capacity": "9.4T",
"host_storage_capacity": "6.8T",
"host_storage_type": "SSD",
"hw_notes": "",
"number_of_nodes": "1",
"operating_system": "Ubuntu 20.04 (linux-6.8.0-49-generic-glibc2.31)",
"operating_system": "Ubuntu 20.04 (linux-6.8.0-51-generic-glibc2.31)",
"other_software_stack": "Python: 3.8.10, GCC-9.4.0, Using Docker , CUDA 12.2",
"status": "available",
"submitter": "MLCommons",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
This experiment is generated using the [MLCommons Collective Mind automation framework (CM)](https://github.com/mlcommons/cm4mlops).

*Check [CM MLPerf docs](https://docs.mlcommons.org/inference) for more details.*

## Host platform
Expand All @@ -19,7 +17,7 @@ pip install -U cmind

cm rm cache -f

cm pull repo mlcommons@mlperf-automations --checkout=c52956b27fa8d06ec8db53f885e1f05021e379e9
cm pull repo mlcommons@mlperf-automations --checkout=48ea6b46a7606d1c5d74909e94d5599dbe7ff9e1

cm run script \
--tags=app,mlperf,inference,generic,_nvidia,_bert-99.9,_tensorrt,_cuda,_valid,_r4.1-dev_default,_offline \
Expand All @@ -41,8 +39,8 @@ cm run script \
--env.CM_RUN_MLPERF_SUBMISSION_PREPROCESSOR=yes \
--env.CM_MLPERF_INFERENCE_PULL_CODE_CHANGES=yes \
--env.CM_MLPERF_INFERENCE_PULL_SRC_CHANGES=yes \
--env.OUTPUT_BASE_DIR=/home/arjun/gh_action_results \
--env.CM_MLPERF_INFERENCE_SUBMISSION_DIR=/home/arjun/gh_action_submissions \
--env.OUTPUT_BASE_DIR=/cm-mount/home/arjun/gh_action_results \
--env.CM_MLPERF_INFERENCE_SUBMISSION_DIR=/cm-mount/home/arjun/gh_action_submissions \
--env.CM_MLPERF_SUBMITTER=MLCommons \
--env.CM_USE_DATASET_FROM_HOST=yes \
--env.CM_USE_MODEL_FROM_HOST=yes \
Expand Down Expand Up @@ -71,7 +69,7 @@ cm run script \
--env.CM_DOCKER_REUSE_EXISTING_CONTAINER=yes \
--env.CM_DOCKER_DETACHED_MODE=yes \
--env.CM_MLPERF_INFERENCE_RESULTS_DIR_=/home/arjun/gh_action_results/valid_results \
--env.CM_DOCKER_CONTAINER_ID=3f9407b7d53a \
--env.CM_DOCKER_CONTAINER_ID=a29220344570 \
--env.CM_MLPERF_LOADGEN_COMPLIANCE_TEST=TEST01 \
--add_deps_recursive.compiler.tags=gcc \
--add_deps_recursive.coco2014-original.tags=_full \
Expand Down Expand Up @@ -104,10 +102,7 @@ cm run script \
--v=False \
--print_env=False \
--print_deps=False \
--dump_version_info=True \
--env.OUTPUT_BASE_DIR=/cm-mount/home/arjun/gh_action_results \
--env.CM_MLPERF_INFERENCE_SUBMISSION_DIR=/cm-mount/home/arjun/gh_action_submissions \
--env.MLPERF_SCRATCH_PATH=/home/cmuser/CM/repos/local/cache/a8c152aef5494496
--dump_version_info=True
```
*Note that if you want to use the [latest automation recipes](https://docs.mlcommons.org/inference) for MLPerf (CM scripts),
you should simply reload mlcommons@mlperf-automations without checkout and clean CM cache as follows:*
Expand All @@ -126,7 +121,7 @@ Platform: RTX4090x1-nvidia_original-gpu-tensorrt-vdefault-default_config
Model Precision: fp16

### Accuracy Results
`F1`: `90.88067`, Required accuracy for closed division `>= 90.78313`
`F1`: `90.88324`, Required accuracy for closed division `>= 90.78313`

### Performance Results
`Samples per second`: `1597.77`
`Samples per second`: `1176.59`
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[2024-12-28 15:24:37,200 main.py:229 INFO] Detected system ID: KnownSystem.RTX4090x1
[2024-12-28 15:24:37,731 generate_conf_files.py:107 INFO] Generated measurements/ entries for RTX4090x1_TRT/bert-99.9/Offline
[2024-12-28 15:24:37,731 __init__.py:46 INFO] Running command: ./build/bin/harness_bert --logfile_outdir="/cm-mount/home/arjun/gh_action_results/valid_results/RTX4090x1-nvidia_original-gpu-tensorrt-vdefault-default_config/bert-99.9/offline/accuracy" --logfile_prefix="mlperf_log_" --performance_sample_count=10833 --test_mode="AccuracyOnly" --gpu_batch_size=256 --mlperf_conf_path="/home/cmuser/CM/repos/local/cache/85453ba5383a47d1/inference/mlperf.conf" --tensor_path="build/preprocessed_data/squad_tokenized/input_ids.npy,build/preprocessed_data/squad_tokenized/segment_ids.npy,build/preprocessed_data/squad_tokenized/input_mask.npy" --use_graphs=false --user_conf_path="/home/cmuser/CM/repos/mlcommons@mlperf-automations/script/generate-mlperf-inference-user-conf/tmp/b449747f5f5a4135a402e6c9015639c8.conf" --gpu_inference_streams=2 --gpu_copy_streams=2 --gpu_engines="./build/engines/RTX4090x1/bert/Offline/bert-Offline-gpu-fp16_S_384_B_256_P_2_vs.custom_k_99_9_MaxP.plan" --scenario Offline --model bert
[2024-12-28 15:24:37,731 __init__.py:53 INFO] Overriding Environment
[2024-12-31 23:51:14,202 main.py:229 INFO] Detected system ID: KnownSystem.RTX4090x1
[2024-12-31 23:51:14,747 generate_conf_files.py:107 INFO] Generated measurements/ entries for RTX4090x1_TRT/bert-99.9/Offline
[2024-12-31 23:51:14,747 __init__.py:46 INFO] Running command: ./build/bin/harness_bert --logfile_outdir="/cm-mount/home/arjun/gh_action_results/valid_results/RTX4090x1-nvidia_original-gpu-tensorrt-vdefault-default_config/bert-99.9/offline/accuracy" --logfile_prefix="mlperf_log_" --performance_sample_count=10833 --test_mode="AccuracyOnly" --gpu_batch_size=256 --mlperf_conf_path="/home/cmuser/CM/repos/local/cache/551e61f86b914205/inference/mlperf.conf" --tensor_path="build/preprocessed_data/squad_tokenized/input_ids.npy,build/preprocessed_data/squad_tokenized/segment_ids.npy,build/preprocessed_data/squad_tokenized/input_mask.npy" --use_graphs=false --user_conf_path="/home/cmuser/CM/repos/mlcommons@mlperf-automations/script/generate-mlperf-inference-user-conf/tmp/4fb75fc446ab4ac1b7d16df1c26e1ed7.conf" --gpu_inference_streams=2 --gpu_copy_streams=2 --gpu_engines="./build/engines/RTX4090x1/bert/Offline/bert-Offline-gpu-fp16_S_384_B_256_P_2_vs.custom_k_99_9_MaxP.plan" --scenario Offline --model bert
[2024-12-31 23:51:14,747 __init__.py:53 INFO] Overriding Environment
benchmark : Benchmark.BERT
buffer_manager_thread_count : 0
coalesced_tensor : True
Expand All @@ -11,8 +11,8 @@ gpu_copy_streams : 2
gpu_inference_streams : 2
input_dtype : int32
input_format : linear
log_dir : /home/cmuser/CM/repos/local/cache/ba8d5f2a6bc546f9/repo/closed/NVIDIA/build/logs/2024.12.28-15.24.32
mlperf_conf_path : /home/cmuser/CM/repos/local/cache/85453ba5383a47d1/inference/mlperf.conf
log_dir : /home/cmuser/CM/repos/local/cache/ba8d5f2a6bc546f9/repo/closed/NVIDIA/build/logs/2024.12.31-23.51.03
mlperf_conf_path : /home/cmuser/CM/repos/local/cache/551e61f86b914205/inference/mlperf.conf
offline_expected_qps : 0.0
precision : fp16
preprocessed_data_dir : /home/cmuser/CM/repos/local/cache/a8c152aef5494496/preprocessed_data
Expand All @@ -21,7 +21,7 @@ system : SystemConfiguration(host_cpu_conf=CPUConfiguration(layout={CPU(name='AM
tensor_path : build/preprocessed_data/squad_tokenized/input_ids.npy,build/preprocessed_data/squad_tokenized/segment_ids.npy,build/preprocessed_data/squad_tokenized/input_mask.npy
test_mode : AccuracyOnly
use_graphs : False
user_conf_path : /home/cmuser/CM/repos/mlcommons@mlperf-automations/script/generate-mlperf-inference-user-conf/tmp/b449747f5f5a4135a402e6c9015639c8.conf
user_conf_path : /home/cmuser/CM/repos/mlcommons@mlperf-automations/script/generate-mlperf-inference-user-conf/tmp/4fb75fc446ab4ac1b7d16df1c26e1ed7.conf
system_id : RTX4090x1
config_name : RTX4090x1_bert_Offline
workload_setting : WorkloadSetting(HarnessType.Custom, AccuracyTarget.k_99_9, PowerSetting.MaxP)
Expand All @@ -34,41 +34,41 @@ skip_file_checks : True
power_limit : None
cpu_freq : None
&&&& RUNNING BERT_HARNESS # ./build/bin/harness_bert
I1228 15:24:37.834396 19732 main_bert.cc:163] Found 1 GPUs
I1228 15:24:38.761662 19732 bert_server.cc:147] Engine Path: ./build/engines/RTX4090x1/bert/Offline/bert-Offline-gpu-fp16_S_384_B_256_P_2_vs.custom_k_99_9_MaxP.plan
[I] [TRT] Loaded engine size: 699 MiB
[I] [TRT] [MemUsageChange] Init cuBLAS/cuBLASLt: CPU +6, GPU +10, now: CPU 863, GPU 1518 (MiB)
[I] [TRT] [MemUsageChange] Init cuDNN: CPU +2, GPU +10, now: CPU 865, GPU 1528 (MiB)
I1231 23:51:14.815665 19726 main_bert.cc:163] Found 1 GPUs
I1231 23:51:15.253394 19726 bert_server.cc:147] Engine Path: ./build/engines/RTX4090x1/bert/Offline/bert-Offline-gpu-fp16_S_384_B_256_P_2_vs.custom_k_99_9_MaxP.plan
[I] [TRT] Loaded engine size: 700 MiB
[I] [TRT] [MemUsageChange] Init cuBLAS/cuBLASLt: CPU +7, GPU +10, now: CPU 864, GPU 1518 (MiB)
[I] [TRT] [MemUsageChange] Init cuDNN: CPU +1, GPU +10, now: CPU 865, GPU 1528 (MiB)
[I] [TRT] [MemUsageChange] TensorRT-managed allocation in engine deserialization: CPU +0, GPU +576, now: CPU 0, GPU 576 (MiB)
I1228 15:24:40.453316 19732 bert_server.cc:208] Engines Creation Completed
I1228 15:24:40.507620 19732 bert_core_vs.cc:385] Engine - Device Memory requirements: 1409287680
I1228 15:24:40.507630 19732 bert_core_vs.cc:393] Engine - Number of Optimization Profiles: 2
I1228 15:24:40.507637 19732 bert_core_vs.cc:415] Engine - Profile 0 maxDims 98304 Bmax=256 Smax=384
I1231 23:51:19.299175 19726 bert_server.cc:208] Engines Creation Completed
I1231 23:51:19.348824 19726 bert_core_vs.cc:385] Engine - Device Memory requirements: 1409287680
I1231 23:51:19.348850 19726 bert_core_vs.cc:393] Engine - Number of Optimization Profiles: 2
I1231 23:51:19.348858 19726 bert_core_vs.cc:415] Engine - Profile 0 maxDims 98304 Bmax=256 Smax=384
[I] [TRT] [MemUsageChange] Init cuBLAS/cuBLASLt: CPU +0, GPU +8, now: CPU 166, GPU 2866 (MiB)
[I] [TRT] [MemUsageChange] Init cuDNN: CPU +0, GPU +8, now: CPU 166, GPU 2874 (MiB)
I1228 15:24:40.577661 19732 bert_core_vs.cc:426] Setting Opt.Prof. to 0
I1231 23:51:19.440239 19726 bert_core_vs.cc:426] Setting Opt.Prof. to 0
[I] [TRT] [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +1, GPU +0, now: CPU 1, GPU 576 (MiB)
I1228 15:24:40.577692 19732 bert_core_vs.cc:444] Context creation complete. Max supported batchSize: 256
I1228 15:24:40.579841 19732 bert_core_vs.cc:476] Setup complete
I1228 15:24:40.580044 19732 bert_core_vs.cc:385] Engine - Device Memory requirements: 1409287680
I1228 15:24:40.580049 19732 bert_core_vs.cc:393] Engine - Number of Optimization Profiles: 2
I1228 15:24:40.580052 19732 bert_core_vs.cc:415] Engine - Profile 1 maxDims 98304 Bmax=256 Smax=384
I1231 23:51:19.440282 19726 bert_core_vs.cc:444] Context creation complete. Max supported batchSize: 256
I1231 23:51:19.442037 19726 bert_core_vs.cc:476] Setup complete
I1231 23:51:19.444537 19726 bert_core_vs.cc:385] Engine - Device Memory requirements: 1409287680
I1231 23:51:19.444542 19726 bert_core_vs.cc:393] Engine - Number of Optimization Profiles: 2
I1231 23:51:19.444546 19726 bert_core_vs.cc:415] Engine - Profile 1 maxDims 98304 Bmax=256 Smax=384
[I] [TRT] [MemUsageChange] Init cuBLAS/cuBLASLt: CPU +0, GPU +8, now: CPU 289, GPU 4352 (MiB)
[I] [TRT] [MemUsageChange] Init cuDNN: CPU +0, GPU +10, now: CPU 289, GPU 4362 (MiB)
[I] [TRT] [MemUsageChange] Init cuDNN: CPU +1, GPU +10, now: CPU 290, GPU 4362 (MiB)
I1231 23:51:19.721244 19726 bert_core_vs.cc:426] Setting Opt.Prof. to 1
[I] [TRT] Could not set default profile 0 for execution context. Profile index must be set explicitly.
I1228 15:24:40.651274 19732 bert_core_vs.cc:426] Setting Opt.Prof. to 1
[I] [TRT] [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +0, GPU +0, now: CPU 1, GPU 576 (MiB)
I1228 15:24:40.651684 19732 bert_core_vs.cc:444] Context creation complete. Max supported batchSize: 256
I1228 15:24:40.652937 19732 bert_core_vs.cc:476] Setup complete
I1228 15:24:41.248067 19732 main_bert.cc:184] Starting running actual test.
I1228 15:24:47.760198 19732 main_bert.cc:190] Finished running actual test.
I1231 23:51:19.721933 19726 bert_core_vs.cc:444] Context creation complete. Max supported batchSize: 256
I1231 23:51:19.723507 19726 bert_core_vs.cc:476] Setup complete
I1231 23:51:20.460515 19726 main_bert.cc:184] Starting running actual test.
I1231 23:51:41.768021 19726 main_bert.cc:190] Finished running actual test.

No warnings encountered during test.
3797 warnings encountered. See detailed log.

No errors encountered during test.
[2024-12-28 15:24:48,231 run_harness.py:166 INFO] Result: Accuracy run detected.
[2024-12-28 15:24:48,231 __init__.py:46 INFO] Running command: PYTHONPATH=code/bert/tensorrt/helpers python3 /home/cmuser/CM/repos/local/cache/ba8d5f2a6bc546f9/repo/closed/NVIDIA/build/inference/language/bert/accuracy-squad.py --log_file /cm-mount/home/arjun/gh_action_results/valid_results/RTX4090x1-nvidia_original-gpu-tensorrt-vdefault-default_config/bert-99.9/offline/accuracy/mlperf_log_accuracy.json --vocab_file build/models/bert/vocab.txt --val_data /home/cmuser/CM/repos/local/cache/a8c152aef5494496/data/squad/dev-v1.1.json --out_file /cm-mount/home/arjun/gh_action_results/valid_results/RTX4090x1-nvidia_original-gpu-tensorrt-vdefault-default_config/bert-99.9/offline/accuracy/predictions.json --output_dtype float16
{"exact_match": 83.6802270577105, "f1": 90.88066528372401}
[2024-12-31 23:51:42,156 run_harness.py:166 INFO] Result: Accuracy run detected.
[2024-12-31 23:51:42,156 __init__.py:46 INFO] Running command: PYTHONPATH=code/bert/tensorrt/helpers python3 /home/cmuser/CM/repos/local/cache/ba8d5f2a6bc546f9/repo/closed/NVIDIA/build/inference/language/bert/accuracy-squad.py --log_file /cm-mount/home/arjun/gh_action_results/valid_results/RTX4090x1-nvidia_original-gpu-tensorrt-vdefault-default_config/bert-99.9/offline/accuracy/mlperf_log_accuracy.json --vocab_file build/models/bert/vocab.txt --val_data /home/cmuser/CM/repos/local/cache/a8c152aef5494496/data/squad/dev-v1.1.json --out_file /cm-mount/home/arjun/gh_action_results/valid_results/RTX4090x1-nvidia_original-gpu-tensorrt-vdefault-default_config/bert-99.9/offline/accuracy/predictions.json --output_dtype float16
{"exact_match": 83.67076631977294, "f1": 90.8832407068292}
Reading examples...
Loading cached features from 'eval_features.pickle'...
Loading LoadGen logs...
Expand Down
Loading

0 comments on commit 4a9d3d4

Please sign in to comment.